2 * This file is part of PowerDNS or dnsdist.
3 * Copyright -- PowerDNS.COM B.V. and its contributors
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * In addition, for the avoidance of any doubt, permission is granted to
10 * link this program with OpenSSL and to (re)distribute the binaries
11 * produced as the result of such linking.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
29 #include <netinet/tcp.h>
31 #include <sys/resource.h>
34 #if defined (__OpenBSD__) || defined(__NetBSD__)
35 #include <readline/readline.h>
37 #include <editline/readline.h>
41 #include <systemd/sd-daemon.h>
45 #include "dnsdist-cache.hh"
46 #include "dnsdist-console.hh"
47 #include "dnsdist-ecs.hh"
48 #include "dnsdist-lua.hh"
49 #include "dnsdist-rings.hh"
50 #include "dnsdist-secpoll.hh"
51 #include "dnsdist-xpf.hh"
54 #include "delaypipe.hh"
57 #include "dnsparser.hh"
58 #include "dnswriter.hh"
59 #include "ednsoptions.hh"
63 #include "sodcrypto.hh"
65 #include "threadname.hh"
69 Receiver is currently single threaded
70 not *that* bad actually, but now that we are thread safe, might want to scale
74 Set of Rules, if one matches, it leads to an Action
75 Both rules and actions could conceivably be Lua based.
76 On the C++ side, both could be inherited from a class Rule and a class Action,
77 on the Lua side we can't do that. */
83 struct DNSDistStats g_stats
;
84 MetricDefinitionStorage g_metricDefinitions
;
86 uint16_t g_maxOutstanding
{std::numeric_limits
<uint16_t>::max()};
87 bool g_verboseHealthChecks
{false};
88 uint32_t g_staleCacheEntriesTTL
{0};
90 bool g_allowEmptyResponse
{false};
92 GlobalStateHolder
<NetmaskGroup
> g_ACL
;
93 string g_outputBuffer
;
95 std::vector
<std::shared_ptr
<TLSFrontend
>> g_tlslocals
;
96 std::vector
<std::shared_ptr
<DOHFrontend
>> g_dohlocals
;
97 std::vector
<std::shared_ptr
<DNSCryptContext
>> g_dnsCryptLocals
;
99 shared_ptr
<BPFFilter
> g_defaultBPFFilter
;
100 std::vector
<std::shared_ptr
<DynBPFFilter
> > g_dynBPFFilters
;
101 #endif /* HAVE_EBPF */
102 std::vector
<std::unique_ptr
<ClientState
>> g_frontends
;
103 GlobalStateHolder
<pools_t
> g_pools
;
104 size_t g_udpVectorSize
{1};
106 bool g_snmpEnabled
{false};
107 bool g_snmpTrapsEnabled
{false};
108 DNSDistSNMPAgent
* g_snmpAgent
{nullptr};
110 /* UDP: the grand design. Per socket we listen on for incoming queries there is one thread.
111 Then we have a bunch of connected sockets for talking to downstream servers.
112 We send directly to those sockets.
114 For the return path, per downstream server we have a thread that listens to responses.
116 Per socket there is an array of 2^16 states, when we send out a packet downstream, we note
117 there the original requestor and the original id. The new ID is the offset in the array.
119 When an answer comes in on a socket, we look up the offset by the id, and lob it to the
122 IDs are assigned by atomic increments of the socket offset.
125 GlobalStateHolder
<vector
<DNSDistRuleAction
> > g_rulactions
;
126 GlobalStateHolder
<vector
<DNSDistResponseRuleAction
> > g_resprulactions
;
127 GlobalStateHolder
<vector
<DNSDistResponseRuleAction
> > g_cachehitresprulactions
;
128 GlobalStateHolder
<vector
<DNSDistResponseRuleAction
> > g_selfansweredresprulactions
;
133 GlobalStateHolder
<servers_t
> g_dstates
;
134 GlobalStateHolder
<NetmaskTree
<DynBlock
>> g_dynblockNMG
;
135 GlobalStateHolder
<SuffixMatchTree
<DynBlock
>> g_dynblockSMT
;
136 DNSAction::Action g_dynBlockAction
= DNSAction::Action::Drop
;
137 int g_tcpRecvTimeout
{2};
138 int g_tcpSendTimeout
{2};
141 bool g_servFailOnNoPolicy
{false};
142 bool g_truncateTC
{false};
143 bool g_fixupCase
{false};
144 bool g_preserveTrailingData
{false};
145 bool g_roundrobinFailOnNoServer
{false};
147 std::set
<std::string
> g_capabilitiesToRetain
;
149 static void truncateTC(char* packet
, uint16_t* len
, size_t responseSize
, unsigned int consumed
)
152 bool hadEDNS
= false;
153 uint16_t payloadSize
= 0;
156 if (g_addEDNSToSelfGeneratedResponses
) {
157 hadEDNS
= getEDNSUDPPayloadSizeAndZ(packet
, *len
, &payloadSize
, &z
);
160 *len
=static_cast<uint16_t>(sizeof(dnsheader
)+consumed
+DNS_TYPE_SIZE
+DNS_CLASS_SIZE
);
161 struct dnsheader
* dh
= reinterpret_cast<struct dnsheader
*>(packet
);
162 dh
->ancount
= dh
->arcount
= dh
->nscount
= 0;
165 addEDNS(dh
, *len
, responseSize
, z
& EDNS_HEADER_FLAG_DO
, payloadSize
, 0);
177 ComboAddress destination
;
178 ComboAddress origDest
;
182 if(origDest
.sin4
.sin_family
== 0) {
183 res
= sendto(fd
, packet
.c_str(), packet
.size(), 0, (struct sockaddr
*)&destination
, destination
.getSocklen());
186 res
= sendfromto(fd
, packet
.c_str(), packet
.size(), 0, origDest
, destination
);
190 vinfolog("Error sending delayed response to %s: %s", destination
.toStringWithPort(), strerror(err
));
195 DelayPipe
<DelayedPacket
>* g_delay
= nullptr;
197 void doLatencyStats(double udiff
)
199 if(udiff
< 1000) ++g_stats
.latency0_1
;
200 else if(udiff
< 10000) ++g_stats
.latency1_10
;
201 else if(udiff
< 50000) ++g_stats
.latency10_50
;
202 else if(udiff
< 100000) ++g_stats
.latency50_100
;
203 else if(udiff
< 1000000) ++g_stats
.latency100_1000
;
204 else ++g_stats
.latencySlow
;
205 g_stats
.latencySum
+= udiff
/ 1000;
207 auto doAvg
= [](double& var
, double n
, double weight
) {
208 var
= (weight
-1) * var
/weight
+ n
/weight
;
211 doAvg(g_stats
.latencyAvg100
, udiff
, 100);
212 doAvg(g_stats
.latencyAvg1000
, udiff
, 1000);
213 doAvg(g_stats
.latencyAvg10000
, udiff
, 10000);
214 doAvg(g_stats
.latencyAvg1000000
, udiff
, 1000000);
217 bool responseContentMatches(const char* response
, const uint16_t responseLen
, const DNSName
& qname
, const uint16_t qtype
, const uint16_t qclass
, const ComboAddress
& remote
, unsigned int& consumed
)
219 if (responseLen
< sizeof(dnsheader
)) {
223 const struct dnsheader
* dh
= reinterpret_cast<const struct dnsheader
*>(response
);
224 if (dh
->qdcount
== 0) {
225 if ((dh
->rcode
!= RCode::NoError
&& dh
->rcode
!= RCode::NXDomain
) || g_allowEmptyResponse
) {
229 ++g_stats
.nonCompliantResponses
;
234 uint16_t rqtype
, rqclass
;
237 rqname
=DNSName(response
, responseLen
, sizeof(dnsheader
), false, &rqtype
, &rqclass
, &consumed
);
239 catch(const std::exception
& e
) {
240 if(responseLen
> 0 && static_cast<size_t>(responseLen
) > sizeof(dnsheader
)) {
241 infolog("Backend %s sent us a response with id %d that did not parse: %s", remote
.toStringWithPort(), ntohs(dh
->id
), e
.what());
243 ++g_stats
.nonCompliantResponses
;
247 if (rqtype
!= qtype
|| rqclass
!= qclass
|| rqname
!= qname
) {
254 static void restoreFlags(struct dnsheader
* dh
, uint16_t origFlags
)
256 static const uint16_t rdMask
= 1 << FLAGS_RD_OFFSET
;
257 static const uint16_t cdMask
= 1 << FLAGS_CD_OFFSET
;
258 static const uint16_t restoreFlagsMask
= UINT16_MAX
& ~(rdMask
| cdMask
);
259 uint16_t * flags
= getFlagsFromDNSHeader(dh
);
260 /* clear the flags we are about to restore */
261 *flags
&= restoreFlagsMask
;
262 /* only keep the flags we want to restore */
263 origFlags
&= ~restoreFlagsMask
;
264 /* set the saved flags as they were */
268 static bool fixUpQueryTurnedResponse(DNSQuestion
& dq
, const uint16_t origFlags
)
270 restoreFlags(dq
.dh
, origFlags
);
272 return addEDNSToQueryTurnedResponse(dq
);
275 static bool fixUpResponse(char** response
, uint16_t* responseLen
, size_t* responseSize
, const DNSName
& qname
, uint16_t origFlags
, bool ednsAdded
, bool ecsAdded
, std::vector
<uint8_t>& rewrittenResponse
, uint16_t addRoom
, bool* zeroScope
)
277 if (*responseLen
< sizeof(dnsheader
)) {
281 struct dnsheader
* dh
= reinterpret_cast<struct dnsheader
*>(*response
);
282 restoreFlags(dh
, origFlags
);
284 if (*responseLen
== sizeof(dnsheader
)) {
289 string realname
= qname
.toDNSString();
290 if (*responseLen
>= (sizeof(dnsheader
) + realname
.length())) {
291 memcpy(*response
+ sizeof(dnsheader
), realname
.c_str(), realname
.length());
295 if (ednsAdded
|| ecsAdded
) {
300 const std::string
responseStr(*response
, *responseLen
);
301 int res
= locateEDNSOptRR(responseStr
, &optStart
, &optLen
, &last
);
304 if (zeroScope
) { // this finds if an EDNS Client Subnet scope was set, and if it is 0
305 size_t optContentStart
= 0;
306 uint16_t optContentLen
= 0;
307 /* we need at least 4 bytes after the option length (family: 2, source prefix-length: 1, scope prefix-length: 1) */
308 if (isEDNSOptionInOpt(responseStr
, optStart
, optLen
, EDNSOptionCode::ECS
, &optContentStart
, &optContentLen
) && optContentLen
>= 4) {
309 /* see if the EDNS Client Subnet SCOPE PREFIX-LENGTH byte in position 3 is set to 0, which is the only thing
311 *zeroScope
= responseStr
.at(optContentStart
+ 3) == 0;
316 /* we added the entire OPT RR,
317 therefore we need to remove it entirely */
319 /* simply remove the last AR */
320 *responseLen
-= optLen
;
321 uint16_t arcount
= ntohs(dh
->arcount
);
323 dh
->arcount
= htons(arcount
);
326 /* Removing an intermediary RR could lead to compression error */
327 if (rewriteResponseWithoutEDNS(responseStr
, rewrittenResponse
) == 0) {
328 *responseLen
= rewrittenResponse
.size();
329 if (addRoom
&& (UINT16_MAX
- *responseLen
) > addRoom
) {
330 rewrittenResponse
.reserve(*responseLen
+ addRoom
);
332 *responseSize
= rewrittenResponse
.capacity();
333 *response
= reinterpret_cast<char*>(rewrittenResponse
.data());
336 warnlog("Error rewriting content");
341 /* the OPT RR was already present, but without ECS,
342 we need to remove the ECS option if any */
344 /* nothing after the OPT RR, we can simply remove the
346 size_t existingOptLen
= optLen
;
347 removeEDNSOptionFromOPT(*response
+ optStart
, &optLen
, EDNSOptionCode::ECS
);
348 *responseLen
-= (existingOptLen
- optLen
);
351 /* Removing an intermediary RR could lead to compression error */
352 if (rewriteResponseWithoutEDNSOption(responseStr
, EDNSOptionCode::ECS
, rewrittenResponse
) == 0) {
353 *responseLen
= rewrittenResponse
.size();
354 if (addRoom
&& (UINT16_MAX
- *responseLen
) > addRoom
) {
355 rewrittenResponse
.reserve(*responseLen
+ addRoom
);
357 *responseSize
= rewrittenResponse
.capacity();
358 *response
= reinterpret_cast<char*>(rewrittenResponse
.data());
361 warnlog("Error rewriting content");
372 static bool encryptResponse(char* response
, uint16_t* responseLen
, size_t responseSize
, bool tcp
, std::shared_ptr
<DNSCryptQuery
> dnsCryptQuery
, dnsheader
** dh
, dnsheader
* dhCopy
)
375 uint16_t encryptedResponseLen
= 0;
377 /* save the original header before encrypting it in place */
378 if (dh
!= nullptr && *dh
!= nullptr && dhCopy
!= nullptr) {
379 memcpy(dhCopy
, *dh
, sizeof(dnsheader
));
383 int res
= dnsCryptQuery
->encryptResponse(response
, *responseLen
, responseSize
, tcp
, &encryptedResponseLen
);
385 *responseLen
= encryptedResponseLen
;
387 /* dropping response */
388 vinfolog("Error encrypting the response, dropping.");
394 #endif /* HAVE_DNSCRYPT */
396 static bool applyRulesToResponse(LocalStateHolder
<vector
<DNSDistResponseRuleAction
> >& localRespRulactions
, DNSResponse
& dr
)
398 DNSResponseAction::Action action
=DNSResponseAction::Action::None
;
399 std::string ruleresult
;
400 for(const auto& lr
: *localRespRulactions
) {
401 if(lr
.d_rule
->matches(&dr
)) {
402 lr
.d_rule
->d_matches
++;
403 action
=(*lr
.d_action
)(&dr
, &ruleresult
);
405 case DNSResponseAction::Action::Allow
:
408 case DNSResponseAction::Action::Drop
:
411 case DNSResponseAction::Action::HeaderModify
:
414 case DNSResponseAction::Action::ServFail
:
415 dr
.dh
->rcode
= RCode::ServFail
;
418 /* non-terminal actions follow */
419 case DNSResponseAction::Action::Delay
:
420 dr
.delayMsec
= static_cast<int>(pdns_stou(ruleresult
)); // sorry
422 case DNSResponseAction::Action::None
:
431 bool processResponse(char** response
, uint16_t* responseLen
, size_t* responseSize
, LocalStateHolder
<vector
<DNSDistResponseRuleAction
> >& localRespRulactions
, DNSResponse
& dr
, size_t addRoom
, std::vector
<uint8_t>& rewrittenResponse
, bool muted
)
433 if (!applyRulesToResponse(localRespRulactions
, dr
)) {
437 bool zeroScope
= false;
438 if (!fixUpResponse(response
, responseLen
, responseSize
, *dr
.qname
, dr
.origFlags
, dr
.ednsAdded
, dr
.ecsAdded
, rewrittenResponse
, addRoom
, dr
.useZeroScope
? &zeroScope
: nullptr)) {
442 if (dr
.packetCache
&& !dr
.skipCache
&& *responseLen
<= s_maxPacketCacheEntrySize
) {
443 if (!dr
.useZeroScope
) {
444 /* if the query was not suitable for zero-scope, for
445 example because it had an existing ECS entry so the hash is
446 not really 'no ECS', so just insert it for the existing subnet
448 - we don't have the correct hash for a non-ECS query
449 - inserting with hash computed before the ECS replacement but with
450 the subnet extracted _after_ the replacement would not work.
454 // if zeroScope, pass the pre-ECS hash-key and do not pass the subnet to the cache
455 dr
.packetCache
->insert(zeroScope
? dr
.cacheKeyNoECS
: dr
.cacheKey
, zeroScope
? boost::none
: dr
.subnet
, dr
.origFlags
, dr
.dnssecOK
, *dr
.qname
, dr
.qtype
, dr
.qclass
, *response
, *responseLen
, dr
.tcp
, dr
.dh
->rcode
, dr
.tempFailureTTL
);
460 if (!encryptResponse(*response
, responseLen
, *responseSize
, dr
.tcp
, dr
.dnsCryptQuery
, nullptr, nullptr)) {
464 #endif /* HAVE_DNSCRYPT */
469 static bool sendUDPResponse(int origFD
, const char* response
, const uint16_t responseLen
, const int delayMsec
, const ComboAddress
& origDest
, const ComboAddress
& origRemote
)
471 if(delayMsec
&& g_delay
) {
472 DelayedPacket dp
{origFD
, string(response
,responseLen
), origRemote
, origDest
};
473 g_delay
->submit(dp
, delayMsec
);
477 if(origDest
.sin4
.sin_family
== 0) {
478 res
= sendto(origFD
, response
, responseLen
, 0, reinterpret_cast<const struct sockaddr
*>(&origRemote
), origRemote
.getSocklen());
481 res
= sendfromto(origFD
, response
, responseLen
, 0, origDest
, origRemote
);
485 vinfolog("Error sending response to %s: %s", origRemote
.toStringWithPort(), stringerror(err
));
493 int pickBackendSocketForSending(std::shared_ptr
<DownstreamState
>& state
)
495 return state
->sockets
[state
->socketsOffset
++ % state
->sockets
.size()];
498 static void pickBackendSocketsReadyForReceiving(const std::shared_ptr
<DownstreamState
>& state
, std::vector
<int>& ready
)
502 if (state
->sockets
.size() == 1) {
503 ready
.push_back(state
->sockets
[0]);
508 std::lock_guard
<std::mutex
> lock(state
->socketsLock
);
509 state
->mplexer
->getAvailableFDs(ready
, -1);
513 // listens on a dedicated socket, lobs answers from downstream servers to original requestors
514 void responderThread(std::shared_ptr
<DownstreamState
> dss
)
516 setThreadName("dnsdist/respond");
517 auto localRespRulactions
= g_resprulactions
.getLocal();
518 char packet
[s_maxPacketCacheEntrySize
+ DNSCRYPT_MAX_RESPONSE_PADDING_AND_MAC_SIZE
];
519 static_assert(sizeof(packet
) <= UINT16_MAX
, "Packet size should fit in a uint16_t");
520 /* when the answer is encrypted in place, we need to get a copy
521 of the original header before encryption to fill the ring buffer */
522 dnsheader cleartextDH
;
523 vector
<uint8_t> rewrittenResponse
;
525 uint16_t queryId
= 0;
526 std::vector
<int> sockets
;
527 sockets
.reserve(dss
->sockets
.size());
530 dnsheader
* dh
= reinterpret_cast<struct dnsheader
*>(packet
);
532 pickBackendSocketsReadyForReceiving(dss
, sockets
);
533 for (const auto& fd
: sockets
) {
534 ssize_t got
= recv(fd
, packet
, sizeof(packet
), 0);
535 char * response
= packet
;
536 size_t responseSize
= sizeof(packet
);
538 if (got
< 0 || static_cast<size_t>(got
) < sizeof(dnsheader
))
541 uint16_t responseLen
= static_cast<uint16_t>(got
);
544 if(queryId
>= dss
->idStates
.size()) {
548 IDState
* ids
= &dss
->idStates
[queryId
];
549 int64_t usageIndicator
= ids
->usageIndicator
;
551 if(!IDState::isInUse(usageIndicator
)) {
552 /* the corresponding state is marked as not in use, meaning that:
553 - it was already cleaned up by another thread and the state is gone ;
554 - we already got a response for this query and this one is a duplicate.
555 Either way, we don't touch it.
560 /* read the potential DOHUnit state as soon as possible, but don't use it
561 until we have confirmed that we own this state by updating usageIndicator */
563 /* setting age to 0 to prevent the maintainer thread from
564 cleaning this IDS while we process the response.
567 int origFD
= ids
->origFD
;
569 unsigned int consumed
= 0;
570 if (!responseContentMatches(response
, responseLen
, ids
->qname
, ids
->qtype
, ids
->qclass
, dss
->remote
, consumed
)) {
574 bool isDoH
= du
!= nullptr;
575 /* atomically mark the state as available, but only if it has not been altered
577 if (ids
->tryMarkUnused(usageIndicator
)) {
578 /* clear the potential DOHUnit asap, it's ours now
579 and since we just marked the state as unused,
580 someone could overwrite it. */
582 /* we only decrement the outstanding counter if the value was not
583 altered in the meantime, which would mean that the state has been actively reused
584 and the other thread has not incremented the outstanding counter, so we don't
585 want it to be decremented twice. */
586 --dss
->outstanding
; // you'd think an attacker could game this, but we're using connected socket
588 /* someone updated the state in the meantime, we can't touch the existing pointer */
590 /* since the state has been updated, we can't safely access it so let's just drop
595 if(dh
->tc
&& g_truncateTC
) {
596 truncateTC(response
, &responseLen
, responseSize
, consumed
);
599 dh
->id
= ids
->origID
;
601 uint16_t addRoom
= 0;
602 DNSResponse dr
= makeDNSResponseFromIDState(*ids
, dh
, sizeof(packet
), responseLen
, false);
603 if (dr
.dnsCryptQuery
) {
604 addRoom
= DNSCRYPT_MAX_RESPONSE_PADDING_AND_MAC_SIZE
;
607 memcpy(&cleartextDH
, dr
.dh
, sizeof(cleartextDH
));
608 if (!processResponse(&response
, &responseLen
, &responseSize
, localRespRulactions
, dr
, addRoom
, rewrittenResponse
, ids
->cs
&& ids
->cs
->muted
)) {
612 if (ids
->cs
&& !ids
->cs
->muted
) {
614 #ifdef HAVE_DNS_OVER_HTTPS
616 du
->response
= std::string(response
, responseLen
);
617 if (send(du
->rsock
, &du
, sizeof(du
), 0) != sizeof(du
)) {
618 /* at this point we have the only remaining pointer on this
619 DOHUnit object since we did set ids->du to nullptr earlier,
620 except if we got the response before the pointer could be
621 released by the frontend */
624 #endif /* HAVE_DNS_OVER_HTTPS */
629 empty
.sin4
.sin_family
= 0;
630 /* if ids->destHarvested is false, origDest holds the listening address.
631 We don't want to use that as a source since it could be 0.0.0.0 for example. */
632 sendUDPResponse(origFD
, response
, responseLen
, dr
.delayMsec
, ids
->destHarvested
? ids
->origDest
: empty
, ids
->origRemote
);
638 ++ids
->cs
->responses
;
642 double udiff
= ids
->sentTime
.udiff();
643 vinfolog("Got answer from %s, relayed to %s%s, took %f usec", dss
->remote
.toStringWithPort(), ids
->origRemote
.toStringWithPort(),
644 isDoH
? " (https)": "", udiff
);
648 g_rings
.insertResponse(ts
, *dr
.remote
, *dr
.qname
, dr
.qtype
, static_cast<unsigned int>(udiff
), static_cast<unsigned int>(got
), cleartextDH
, dss
->remote
);
650 switch (cleartextDH
.rcode
) {
651 case RCode::NXDomain
:
652 ++g_stats
.frontendNXDomain
;
654 case RCode::ServFail
:
655 ++g_stats
.servfailResponses
;
656 ++g_stats
.frontendServFail
;
659 ++g_stats
.frontendNoError
;
662 dss
->latencyUsec
= (127.0 * dss
->latencyUsec
/ 128.0) + udiff
/128.0;
664 doLatencyStats(udiff
);
666 rewrittenResponse
.clear();
669 catch(const std::exception
& e
){
670 vinfolog("Got an error in UDP responder thread while parsing a response from %s, id %d: %s", dss
->remote
.toStringWithPort(), queryId
, e
.what());
674 catch(const std::exception
& e
)
676 errlog("UDP responder thread died because of exception: %s", e
.what());
678 catch(const PDNSException
& e
)
680 errlog("UDP responder thread died because of PowerDNS exception: %s", e
.reason
);
684 errlog("UDP responder thread died because of an exception: %s", "unknown");
687 bool DownstreamState::reconnect()
689 std::unique_lock
<std::mutex
> tl(connectLock
, std::try_to_lock
);
690 if (!tl
.owns_lock()) {
691 /* we are already reconnecting */
696 for (auto& fd
: sockets
) {
698 if (sockets
.size() > 1) {
699 std::lock_guard
<std::mutex
> lock(socketsLock
);
700 mplexer
->removeReadFD(fd
);
702 /* shutdown() is needed to wake up recv() in the responderThread */
703 shutdown(fd
, SHUT_RDWR
);
707 if (!IsAnyAddress(remote
)) {
708 fd
= SSocket(remote
.sin4
.sin_family
, SOCK_DGRAM
, 0);
709 if (!IsAnyAddress(sourceAddr
)) {
710 SSetsockopt(fd
, SOL_SOCKET
, SO_REUSEADDR
, 1);
711 if (!sourceItfName
.empty()) {
712 #ifdef SO_BINDTODEVICE
713 int res
= setsockopt(fd
, SOL_SOCKET
, SO_BINDTODEVICE
, sourceItfName
.c_str(), sourceItfName
.length());
715 infolog("Error setting up the interface on backend socket '%s': %s", remote
.toStringWithPort(), stringerror());
720 SBind(fd
, sourceAddr
);
723 SConnect(fd
, remote
);
724 if (sockets
.size() > 1) {
725 std::lock_guard
<std::mutex
> lock(socketsLock
);
726 mplexer
->addReadFD(fd
, [](int, boost::any
) {});
730 catch(const std::runtime_error
& error
) {
731 infolog("Error connecting to new server with address %s: %s", remote
.toStringWithPort(), error
.what());
738 /* if at least one (re-)connection failed, close all sockets */
740 for (auto& fd
: sockets
) {
742 if (sockets
.size() > 1) {
743 std::lock_guard
<std::mutex
> lock(socketsLock
);
744 mplexer
->removeReadFD(fd
);
746 /* shutdown() is needed to wake up recv() in the responderThread */
747 shutdown(fd
, SHUT_RDWR
);
756 void DownstreamState::hash()
758 vinfolog("Computing hashes for id=%s and weight=%d", id
, weight
);
760 WriteLock
wl(&d_lock
);
763 std::string uuid
= boost::str(boost::format("%s-%d") % id
% w
);
764 unsigned int wshash
= burtleCI((const unsigned char*)uuid
.c_str(), uuid
.size(), g_hashperturb
);
765 hashes
.insert(wshash
);
770 void DownstreamState::setId(const boost::uuids::uuid
& newId
)
773 // compute hashes only if already done
774 if (!hashes
.empty()) {
779 void DownstreamState::setWeight(int newWeight
)
782 errlog("Error setting server's weight: downstream weight value must be greater than 0.");
786 if (!hashes
.empty()) {
791 DownstreamState::DownstreamState(const ComboAddress
& remote_
, const ComboAddress
& sourceAddr_
, unsigned int sourceItf_
, const std::string
& sourceItfName_
, size_t numberOfSockets
): sourceItfName(sourceItfName_
), remote(remote_
), sourceAddr(sourceAddr_
), sourceItf(sourceItf_
)
793 pthread_rwlock_init(&d_lock
, nullptr);
795 threadStarted
.clear();
797 mplexer
= std::unique_ptr
<FDMultiplexer
>(FDMultiplexer::getMultiplexerSilent());
799 sockets
.resize(numberOfSockets
);
800 for (auto& fd
: sockets
) {
804 if (!IsAnyAddress(remote
)) {
806 idStates
.resize(g_maxOutstanding
);
808 infolog("Added downstream server %s", remote
.toStringWithPort());
813 std::mutex g_luamutex
;
816 GlobalStateHolder
<ServerPolicy
> g_policy
;
818 shared_ptr
<DownstreamState
> firstAvailable(const NumberedServerVector
& servers
, const DNSQuestion
* dq
)
820 for(auto& d
: servers
) {
821 if(d
.second
->isUp() && d
.second
->qps
.check())
824 return leastOutstanding(servers
, dq
);
827 // get server with least outstanding queries, and within those, with the lowest order, and within those: the fastest
828 shared_ptr
<DownstreamState
> leastOutstanding(const NumberedServerVector
& servers
, const DNSQuestion
* dq
)
830 if (servers
.size() == 1 && servers
[0].second
->isUp()) {
831 return servers
[0].second
;
834 vector
<pair
<tuple
<int,int,double>, shared_ptr
<DownstreamState
>>> poss
;
835 /* so you might wonder, why do we go through this trouble? The data on which we sort could change during the sort,
836 which would suck royally and could even lead to crashes. So first we snapshot on what we sort, and then we sort */
837 poss
.reserve(servers
.size());
838 for(auto& d
: servers
) {
839 if(d
.second
->isUp()) {
840 poss
.push_back({make_tuple(d
.second
->outstanding
.load(), d
.second
->order
, d
.second
->latencyUsec
), d
.second
});
844 return shared_ptr
<DownstreamState
>();
845 nth_element(poss
.begin(), poss
.begin(), poss
.end(), [](const decltype(poss
)::value_type
& a
, const decltype(poss
)::value_type
& b
) { return a
.first
< b
.first
; });
846 return poss
.begin()->second
;
849 shared_ptr
<DownstreamState
> valrandom(unsigned int val
, const NumberedServerVector
& servers
, const DNSQuestion
* dq
)
851 vector
<pair
<int, shared_ptr
<DownstreamState
>>> poss
;
853 int max
= std::numeric_limits
<int>::max();
855 for(auto& d
: servers
) { // w=1, w=10 -> 1, 11
856 if(d
.second
->isUp()) {
857 // Don't overflow sum when adding high weights
858 if(d
.second
->weight
> max
- sum
) {
861 sum
+= d
.second
->weight
;
864 poss
.push_back({sum
, d
.second
});
868 // Catch poss & sum are empty to avoid SIGFPE
870 return shared_ptr
<DownstreamState
>();
873 auto p
= upper_bound(poss
.begin(), poss
.end(),r
, [](int r_
, const decltype(poss
)::value_type
& a
) { return r_
< a
.first
;});
875 return shared_ptr
<DownstreamState
>();
879 shared_ptr
<DownstreamState
> wrandom(const NumberedServerVector
& servers
, const DNSQuestion
* dq
)
881 return valrandom(random(), servers
, dq
);
884 uint32_t g_hashperturb
;
885 shared_ptr
<DownstreamState
> whashed(const NumberedServerVector
& servers
, const DNSQuestion
* dq
)
887 return valrandom(dq
->qname
->hash(g_hashperturb
), servers
, dq
);
890 shared_ptr
<DownstreamState
> chashed(const NumberedServerVector
& servers
, const DNSQuestion
* dq
)
892 unsigned int qhash
= dq
->qname
->hash(g_hashperturb
);
893 unsigned int sel
= std::numeric_limits
<unsigned int>::max();
894 unsigned int min
= std::numeric_limits
<unsigned int>::max();
895 shared_ptr
<DownstreamState
> ret
= nullptr, first
= nullptr;
897 for (const auto& d
: servers
) {
898 if (d
.second
->isUp()) {
899 // make sure hashes have been computed
900 if (d
.second
->hashes
.empty()) {
904 ReadLock
rl(&(d
.second
->d_lock
));
905 const auto& server
= d
.second
;
906 // we want to keep track of the last hash
907 if (min
> *(server
->hashes
.begin())) {
908 min
= *(server
->hashes
.begin());
912 auto hash_it
= server
->hashes
.lower_bound(qhash
);
913 if (hash_it
!= server
->hashes
.end()) {
914 if (*hash_it
< sel
) {
922 if (ret
!= nullptr) {
925 if (first
!= nullptr) {
928 return shared_ptr
<DownstreamState
>();
931 shared_ptr
<DownstreamState
> roundrobin(const NumberedServerVector
& servers
, const DNSQuestion
* dq
)
933 NumberedServerVector poss
;
935 for(auto& d
: servers
) {
936 if(d
.second
->isUp()) {
941 const auto *res
=&poss
;
942 if(poss
.empty() && !g_roundrobinFailOnNoServer
)
946 return shared_ptr
<DownstreamState
>();
948 static unsigned int counter
;
950 return (*res
)[(counter
++) % res
->size()].second
;
953 ComboAddress g_serverControl
{"127.0.0.1:5199"};
955 std::shared_ptr
<ServerPool
> createPoolIfNotExists(pools_t
& pools
, const string
& poolName
)
957 std::shared_ptr
<ServerPool
> pool
;
958 pools_t::iterator it
= pools
.find(poolName
);
959 if (it
!= pools
.end()) {
963 if (!poolName
.empty())
964 vinfolog("Creating pool %s", poolName
);
965 pool
= std::make_shared
<ServerPool
>();
966 pools
.insert(std::pair
<std::string
,std::shared_ptr
<ServerPool
> >(poolName
, pool
));
971 void setPoolPolicy(pools_t
& pools
, const string
& poolName
, std::shared_ptr
<ServerPolicy
> policy
)
973 std::shared_ptr
<ServerPool
> pool
= createPoolIfNotExists(pools
, poolName
);
974 if (!poolName
.empty()) {
975 vinfolog("Setting pool %s server selection policy to %s", poolName
, policy
->name
);
977 vinfolog("Setting default pool server selection policy to %s", policy
->name
);
979 pool
->policy
= policy
;
982 void addServerToPool(pools_t
& pools
, const string
& poolName
, std::shared_ptr
<DownstreamState
> server
)
984 std::shared_ptr
<ServerPool
> pool
= createPoolIfNotExists(pools
, poolName
);
985 if (!poolName
.empty()) {
986 vinfolog("Adding server to pool %s", poolName
);
988 vinfolog("Adding server to default pool");
990 pool
->addServer(server
);
993 void removeServerFromPool(pools_t
& pools
, const string
& poolName
, std::shared_ptr
<DownstreamState
> server
)
995 std::shared_ptr
<ServerPool
> pool
= getPool(pools
, poolName
);
997 if (!poolName
.empty()) {
998 vinfolog("Removing server from pool %s", poolName
);
1001 vinfolog("Removing server from default pool");
1004 pool
->removeServer(server
);
1007 std::shared_ptr
<ServerPool
> getPool(const pools_t
& pools
, const std::string
& poolName
)
1009 pools_t::const_iterator it
= pools
.find(poolName
);
1011 if (it
== pools
.end()) {
1012 throw std::out_of_range("No pool named " + poolName
);
1018 NumberedServerVector
getDownstreamCandidates(const pools_t
& pools
, const std::string
& poolName
)
1020 std::shared_ptr
<ServerPool
> pool
= getPool(pools
, poolName
);
1021 return pool
->getServers();
1024 static void spoofResponseFromString(DNSQuestion
& dq
, const string
& spoofContent
)
1028 std::vector
<std::string
> addrs
;
1029 stringtok(addrs
, spoofContent
, " ,");
1031 if (addrs
.size() == 1) {
1033 ComboAddress
spoofAddr(spoofContent
);
1034 SpoofAction
sa({spoofAddr
});
1037 catch(const PDNSException
&e
) {
1038 SpoofAction
sa(spoofContent
); // CNAME then
1042 std::vector
<ComboAddress
> cas
;
1043 for (const auto& addr
: addrs
) {
1045 cas
.push_back(ComboAddress(addr
));
1050 SpoofAction
sa(cas
);
1055 bool processRulesResult(const DNSAction::Action
& action
, DNSQuestion
& dq
, std::string
& ruleresult
, bool& drop
)
1058 case DNSAction::Action::Allow
:
1061 case DNSAction::Action::Drop
:
1066 case DNSAction::Action::Nxdomain
:
1067 dq
.dh
->rcode
= RCode::NXDomain
;
1069 ++g_stats
.ruleNXDomain
;
1072 case DNSAction::Action::Refused
:
1073 dq
.dh
->rcode
= RCode::Refused
;
1075 ++g_stats
.ruleRefused
;
1078 case DNSAction::Action::ServFail
:
1079 dq
.dh
->rcode
= RCode::ServFail
;
1081 ++g_stats
.ruleServFail
;
1084 case DNSAction::Action::Spoof
:
1085 spoofResponseFromString(dq
, ruleresult
);
1088 case DNSAction::Action::Truncate
:
1093 case DNSAction::Action::HeaderModify
:
1096 case DNSAction::Action::Pool
:
1097 dq
.poolname
=ruleresult
;
1100 case DNSAction::Action::NoRecurse
:
1104 /* non-terminal actions follow */
1105 case DNSAction::Action::Delay
:
1106 dq
.delayMsec
= static_cast<int>(pdns_stou(ruleresult
)); // sorry
1108 case DNSAction::Action::None
:
1110 case DNSAction::Action::NoOp
:
1114 /* false means that we don't stop the processing */
1119 static bool applyRulesToQuery(LocalHolders
& holders
, DNSQuestion
& dq
, const struct timespec
& now
)
1121 g_rings
.insertQuery(now
, *dq
.remote
, *dq
.qname
, dq
.qtype
, dq
.len
, *dq
.dh
);
1123 if(g_qcount
.enabled
) {
1124 string qname
= (*dq
.qname
).toLogString();
1125 bool countQuery
{true};
1126 if(g_qcount
.filter
) {
1127 std::lock_guard
<std::mutex
> lock(g_luamutex
);
1128 std::tie (countQuery
, qname
) = g_qcount
.filter(&dq
);
1132 WriteLock
wl(&g_qcount
.queryLock
);
1133 if(!g_qcount
.records
.count(qname
)) {
1134 g_qcount
.records
[qname
] = 0;
1136 g_qcount
.records
[qname
]++;
1140 if(auto got
= holders
.dynNMGBlock
->lookup(*dq
.remote
)) {
1141 auto updateBlockStats
= [&got
]() {
1142 ++g_stats
.dynBlocked
;
1143 got
->second
.blocks
++;
1146 if(now
< got
->second
.until
) {
1147 DNSAction::Action action
= got
->second
.action
;
1148 if (action
== DNSAction::Action::None
) {
1149 action
= g_dynBlockAction
;
1152 case DNSAction::Action::NoOp
:
1156 case DNSAction::Action::Nxdomain
:
1157 vinfolog("Query from %s turned into NXDomain because of dynamic block", dq
.remote
->toStringWithPort());
1160 dq
.dh
->rcode
= RCode::NXDomain
;
1164 case DNSAction::Action::Refused
:
1165 vinfolog("Query from %s refused because of dynamic block", dq
.remote
->toStringWithPort());
1168 dq
.dh
->rcode
= RCode::Refused
;
1172 case DNSAction::Action::Truncate
:
1175 vinfolog("Query from %s truncated because of dynamic block", dq
.remote
->toStringWithPort());
1181 vinfolog("Query from %s for %s over TCP *not* truncated because of dynamic block", dq
.remote
->toStringWithPort(), dq
.qname
->toLogString());
1184 case DNSAction::Action::NoRecurse
:
1186 vinfolog("Query from %s setting rd=0 because of dynamic block", dq
.remote
->toStringWithPort());
1191 vinfolog("Query from %s dropped because of dynamic block", dq
.remote
->toStringWithPort());
1197 if(auto got
= holders
.dynSMTBlock
->lookup(*dq
.qname
)) {
1198 auto updateBlockStats
= [&got
]() {
1199 ++g_stats
.dynBlocked
;
1203 if(now
< got
->until
) {
1204 DNSAction::Action action
= got
->action
;
1205 if (action
== DNSAction::Action::None
) {
1206 action
= g_dynBlockAction
;
1209 case DNSAction::Action::NoOp
:
1212 case DNSAction::Action::Nxdomain
:
1213 vinfolog("Query from %s for %s turned into NXDomain because of dynamic block", dq
.remote
->toStringWithPort(), dq
.qname
->toLogString());
1216 dq
.dh
->rcode
= RCode::NXDomain
;
1219 case DNSAction::Action::Refused
:
1220 vinfolog("Query from %s for %s refused because of dynamic block", dq
.remote
->toStringWithPort(), dq
.qname
->toLogString());
1223 dq
.dh
->rcode
= RCode::Refused
;
1226 case DNSAction::Action::Truncate
:
1230 vinfolog("Query from %s for %s truncated because of dynamic block", dq
.remote
->toStringWithPort(), dq
.qname
->toLogString());
1236 vinfolog("Query from %s for %s over TCP *not* truncated because of dynamic block", dq
.remote
->toStringWithPort(), dq
.qname
->toLogString());
1239 case DNSAction::Action::NoRecurse
:
1241 vinfolog("Query from %s setting rd=0 because of dynamic block", dq
.remote
->toStringWithPort());
1246 vinfolog("Query from %s for %s dropped because of dynamic block", dq
.remote
->toStringWithPort(), dq
.qname
->toLogString());
1252 DNSAction::Action action
=DNSAction::Action::None
;
1255 for(const auto& lr
: *holders
.rulactions
) {
1256 if(lr
.d_rule
->matches(&dq
)) {
1257 lr
.d_rule
->d_matches
++;
1258 action
=(*lr
.d_action
)(&dq
, &ruleresult
);
1259 if (processRulesResult(action
, dq
, ruleresult
, drop
)) {
1272 ssize_t
udpClientSendRequestToBackend(const std::shared_ptr
<DownstreamState
>& ss
, const int sd
, const char* request
, const size_t requestLen
, bool healthCheck
)
1276 if (ss
->sourceItf
== 0) {
1277 result
= send(sd
, request
, requestLen
, 0);
1282 cmsgbuf_aligned cbuf
;
1283 ComboAddress
remote(ss
->remote
);
1284 fillMSGHdr(&msgh
, &iov
, &cbuf
, sizeof(cbuf
), const_cast<char*>(request
), requestLen
, &remote
);
1285 addCMsgSrcAddr(&msgh
, &cbuf
, &ss
->sourceAddr
, ss
->sourceItf
);
1286 result
= sendmsg(sd
, &msgh
, 0);
1290 int savederrno
= errno
;
1291 vinfolog("Error sending request to backend %s: %d", ss
->remote
.toStringWithPort(), savederrno
);
1293 /* This might sound silly, but on Linux send() might fail with EINVAL
1294 if the interface the socket was bound to doesn't exist anymore.
1295 We don't want to reconnect the real socket if the healthcheck failed,
1296 because it's not using the same socket.
1298 if (!healthCheck
&& (savederrno
== EINVAL
|| savederrno
== ENODEV
)) {
1306 static bool isUDPQueryAcceptable(ClientState
& cs
, LocalHolders
& holders
, const struct msghdr
* msgh
, const ComboAddress
& remote
, ComboAddress
& dest
)
1308 if (msgh
->msg_flags
& MSG_TRUNC
) {
1309 /* message was too large for our buffer */
1310 vinfolog("Dropping message too large for our buffer");
1311 ++g_stats
.nonCompliantQueries
;
1315 if(!holders
.acl
->match(remote
)) {
1316 vinfolog("Query from %s dropped because of ACL", remote
.toStringWithPort());
1324 if (HarvestDestinationAddress(msgh
, &dest
)) {
1325 /* we don't get the port, only the address */
1326 dest
.sin4
.sin_port
= cs
.local
.sin4
.sin_port
;
1329 dest
.sin4
.sin_family
= 0;
1335 boost::optional
<std::vector
<uint8_t>> checkDNSCryptQuery(const ClientState
& cs
, const char* query
, uint16_t& len
, std::shared_ptr
<DNSCryptQuery
>& dnsCryptQuery
, time_t now
, bool tcp
)
1337 if (cs
.dnscryptCtx
) {
1338 #ifdef HAVE_DNSCRYPT
1339 vector
<uint8_t> response
;
1340 uint16_t decryptedQueryLen
= 0;
1342 dnsCryptQuery
= std::make_shared
<DNSCryptQuery
>(cs
.dnscryptCtx
);
1344 bool decrypted
= handleDNSCryptQuery(const_cast<char*>(query
), len
, dnsCryptQuery
, &decryptedQueryLen
, tcp
, now
, response
);
1347 if (response
.size() > 0) {
1350 throw std::runtime_error("Unable to decrypt DNSCrypt query, dropping.");
1353 len
= decryptedQueryLen
;
1354 #endif /* HAVE_DNSCRYPT */
1359 bool checkQueryHeaders(const struct dnsheader
* dh
)
1361 if (dh
->qr
) { // don't respond to responses
1362 ++g_stats
.nonCompliantQueries
;
1366 if (dh
->qdcount
== 0) {
1367 ++g_stats
.emptyQueries
;
1372 ++g_stats
.rdQueries
;
1378 #if defined(HAVE_RECVMMSG) && defined(HAVE_SENDMMSG) && defined(MSG_WAITFORONE)
1379 static void queueResponse(const ClientState
& cs
, const char* response
, uint16_t responseLen
, const ComboAddress
& dest
, const ComboAddress
& remote
, struct mmsghdr
& outMsg
, struct iovec
* iov
, cmsgbuf_aligned
* cbuf
)
1382 fillMSGHdr(&outMsg
.msg_hdr
, iov
, nullptr, 0, const_cast<char*>(response
), responseLen
, const_cast<ComboAddress
*>(&remote
));
1384 if (dest
.sin4
.sin_family
== 0) {
1385 outMsg
.msg_hdr
.msg_control
= nullptr;
1388 addCMsgSrcAddr(&outMsg
.msg_hdr
, cbuf
, &dest
, 0);
1391 #endif /* defined(HAVE_RECVMMSG) && defined(HAVE_SENDMMSG) && defined(MSG_WAITFORONE) */
1393 /* self-generated responses or cache hits */
1394 static bool prepareOutgoingResponse(LocalHolders
& holders
, ClientState
& cs
, DNSQuestion
& dq
, bool cacheHit
)
1396 DNSResponse
dr(dq
.qname
, dq
.qtype
, dq
.qclass
, dq
.consumed
, dq
.local
, dq
.remote
, reinterpret_cast<dnsheader
*>(dq
.dh
), dq
.size
, dq
.len
, dq
.tcp
, dq
.queryTime
);
1398 #ifdef HAVE_PROTOBUF
1399 dr
.uniqueId
= dq
.uniqueId
;
1402 dr
.delayMsec
= dq
.delayMsec
;
1404 if (!applyRulesToResponse(cacheHit
? holders
.cacheHitRespRulactions
: holders
.selfAnsweredRespRulactions
, dr
)) {
1408 /* in case a rule changed it */
1409 dq
.delayMsec
= dr
.delayMsec
;
1411 #ifdef HAVE_DNSCRYPT
1413 if (!encryptResponse(reinterpret_cast<char*>(dq
.dh
), &dq
.len
, dq
.size
, dq
.tcp
, dq
.dnsCryptQuery
, nullptr, nullptr)) {
1417 #endif /* HAVE_DNSCRYPT */
1420 ++g_stats
.cacheHits
;
1423 switch (dr
.dh
->rcode
) {
1424 case RCode::NXDomain
:
1425 ++g_stats
.frontendNXDomain
;
1427 case RCode::ServFail
:
1428 ++g_stats
.frontendServFail
;
1430 case RCode::NoError
:
1431 ++g_stats
.frontendNoError
;
1435 doLatencyStats(0); // we're not going to measure this
1439 ProcessQueryResult
processQuery(DNSQuestion
& dq
, ClientState
& cs
, LocalHolders
& holders
, std::shared_ptr
<DownstreamState
>& selectedBackend
)
1441 const uint16_t queryId
= ntohs(dq
.dh
->id
);
1444 /* we need an accurate ("real") value for the response and
1445 to store into the IDS, but not for insertion into the
1446 rings for example */
1447 struct timespec now
;
1450 if (!applyRulesToQuery(holders
, dq
, now
)) {
1451 return ProcessQueryResult::Drop
;
1454 if(dq
.dh
->qr
) { // something turned it into a response
1455 fixUpQueryTurnedResponse(dq
, dq
.origFlags
);
1457 if (!prepareOutgoingResponse(holders
, cs
, dq
, false)) {
1458 return ProcessQueryResult::Drop
;
1461 ++g_stats
.selfAnswered
;
1463 return ProcessQueryResult::SendAnswer
;
1466 std::shared_ptr
<ServerPool
> serverPool
= getPool(*holders
.pools
, dq
.poolname
);
1467 dq
.packetCache
= serverPool
->packetCache
;
1468 auto policy
= *(holders
.policy
);
1469 if (serverPool
->policy
!= nullptr) {
1470 policy
= *(serverPool
->policy
);
1472 auto servers
= serverPool
->getServers();
1474 std::lock_guard
<std::mutex
> lock(g_luamutex
);
1475 selectedBackend
= policy
.policy(servers
, &dq
);
1478 selectedBackend
= policy
.policy(servers
, &dq
);
1481 uint16_t cachedResponseSize
= dq
.size
;
1482 uint32_t allowExpired
= selectedBackend
? 0 : g_staleCacheEntriesTTL
;
1484 if (dq
.packetCache
&& !dq
.skipCache
) {
1485 dq
.dnssecOK
= (getEDNSZ(dq
) & EDNS_HEADER_FLAG_DO
);
1488 if (dq
.useECS
&& ((selectedBackend
&& selectedBackend
->useECS
) || (!selectedBackend
&& serverPool
->getECS()))) {
1489 // we special case our cache in case a downstream explicitly gave us a universally valid response with a 0 scope
1490 if (dq
.packetCache
&& !dq
.skipCache
&& (!selectedBackend
|| !selectedBackend
->disableZeroScope
) && dq
.packetCache
->isECSParsingEnabled()) {
1491 if (dq
.packetCache
->get(dq
, dq
.consumed
, dq
.dh
->id
, reinterpret_cast<char*>(dq
.dh
), &cachedResponseSize
, &dq
.cacheKeyNoECS
, dq
.subnet
, dq
.dnssecOK
, allowExpired
)) {
1492 dq
.len
= cachedResponseSize
;
1494 if (!prepareOutgoingResponse(holders
, cs
, dq
, true)) {
1495 return ProcessQueryResult::Drop
;
1498 return ProcessQueryResult::SendAnswer
;
1502 /* there was no existing ECS on the query, enable the zero-scope feature */
1503 dq
.useZeroScope
= true;
1507 if (!handleEDNSClientSubnet(dq
, &(dq
.ednsAdded
), &(dq
.ecsAdded
), g_preserveTrailingData
)) {
1508 vinfolog("Dropping query from %s because we couldn't insert the ECS value", dq
.remote
->toStringWithPort());
1509 return ProcessQueryResult::Drop
;
1513 if (dq
.packetCache
&& !dq
.skipCache
) {
1514 if (dq
.packetCache
->get(dq
, dq
.consumed
, dq
.dh
->id
, reinterpret_cast<char*>(dq
.dh
), &cachedResponseSize
, &dq
.cacheKey
, dq
.subnet
, dq
.dnssecOK
, allowExpired
)) {
1515 dq
.len
= cachedResponseSize
;
1517 if (!prepareOutgoingResponse(holders
, cs
, dq
, true)) {
1518 return ProcessQueryResult::Drop
;
1521 return ProcessQueryResult::SendAnswer
;
1523 ++g_stats
.cacheMisses
;
1526 if(!selectedBackend
) {
1529 vinfolog("%s query for %s|%s from %s, no policy applied", g_servFailOnNoPolicy
? "ServFailed" : "Dropped", dq
.qname
->toLogString(), QType(dq
.qtype
).getName(), dq
.remote
->toStringWithPort());
1530 if (g_servFailOnNoPolicy
) {
1531 restoreFlags(dq
.dh
, dq
.origFlags
);
1533 dq
.dh
->rcode
= RCode::ServFail
;
1536 if (!prepareOutgoingResponse(holders
, cs
, dq
, false)) {
1537 return ProcessQueryResult::Drop
;
1539 // no response-only statistics counter to update.
1540 return ProcessQueryResult::SendAnswer
;
1543 return ProcessQueryResult::Drop
;
1546 if (dq
.addXPF
&& selectedBackend
->xpfRRCode
!= 0) {
1547 addXPF(dq
, selectedBackend
->xpfRRCode
, g_preserveTrailingData
);
1550 selectedBackend
->queries
++;
1551 return ProcessQueryResult::PassToBackend
;
1553 catch(const std::exception
& e
){
1554 vinfolog("Got an error while parsing a %s query from %s, id %d: %s", (dq
.tcp
? "TCP" : "UDP"), dq
.remote
->toStringWithPort(), queryId
, e
.what());
1556 return ProcessQueryResult::Drop
;
1559 static void processUDPQuery(ClientState
& cs
, LocalHolders
& holders
, const struct msghdr
* msgh
, const ComboAddress
& remote
, ComboAddress
& dest
, char* query
, uint16_t len
, size_t queryBufferSize
, struct mmsghdr
* responsesVect
, unsigned int* queuedResponses
, struct iovec
* respIOV
, cmsgbuf_aligned
* respCBuf
)
1561 assert(responsesVect
== nullptr || (queuedResponses
!= nullptr && respIOV
!= nullptr && respCBuf
!= nullptr));
1562 uint16_t queryId
= 0;
1565 if (!isUDPQueryAcceptable(cs
, holders
, msgh
, remote
, dest
)) {
1569 /* we need an accurate ("real") value for the response and
1570 to store into the IDS, but not for insertion into the
1571 rings for example */
1572 struct timespec queryRealTime
;
1573 gettime(&queryRealTime
, true);
1575 std::shared_ptr
<DNSCryptQuery
> dnsCryptQuery
= nullptr;
1576 auto dnsCryptResponse
= checkDNSCryptQuery(cs
, query
, len
, dnsCryptQuery
, queryRealTime
.tv_sec
, false);
1577 if (dnsCryptResponse
) {
1578 sendUDPResponse(cs
.udpFD
, reinterpret_cast<char*>(dnsCryptResponse
->data()), static_cast<uint16_t>(dnsCryptResponse
->size()), 0, dest
, remote
);
1582 struct dnsheader
* dh
= reinterpret_cast<struct dnsheader
*>(query
);
1583 queryId
= ntohs(dh
->id
);
1585 if (!checkQueryHeaders(dh
)) {
1589 uint16_t qtype
, qclass
;
1590 unsigned int consumed
= 0;
1591 DNSName
qname(query
, len
, sizeof(dnsheader
), false, &qtype
, &qclass
, &consumed
);
1592 DNSQuestion
dq(&qname
, qtype
, qclass
, consumed
, dest
.sin4
.sin_family
!= 0 ? &dest
: &cs
.local
, &remote
, dh
, queryBufferSize
, len
, false, &queryRealTime
);
1593 dq
.dnsCryptQuery
= std::move(dnsCryptQuery
);
1594 std::shared_ptr
<DownstreamState
> ss
{nullptr};
1595 auto result
= processQuery(dq
, cs
, holders
, ss
);
1597 if (result
== ProcessQueryResult::Drop
) {
1601 if (result
== ProcessQueryResult::SendAnswer
) {
1602 #if defined(HAVE_RECVMMSG) && defined(HAVE_SENDMMSG) && defined(MSG_WAITFORONE)
1603 if (dq
.delayMsec
== 0 && responsesVect
!= nullptr) {
1604 queueResponse(cs
, reinterpret_cast<char*>(dq
.dh
), dq
.len
, *dq
.local
, *dq
.remote
, responsesVect
[*queuedResponses
], respIOV
, respCBuf
);
1605 (*queuedResponses
)++;
1608 #endif /* defined(HAVE_RECVMMSG) && defined(HAVE_SENDMMSG) && defined(MSG_WAITFORONE) */
1609 /* we use dest, always, because we don't want to use the listening address to send a response since it could be 0.0.0.0 */
1610 sendUDPResponse(cs
.udpFD
, reinterpret_cast<char*>(dq
.dh
), dq
.len
, dq
.delayMsec
, dest
, *dq
.remote
);
1614 if (result
!= ProcessQueryResult::PassToBackend
|| ss
== nullptr) {
1618 unsigned int idOffset
= (ss
->idOffset
++) % ss
->idStates
.size();
1619 IDState
* ids
= &ss
->idStates
[idOffset
];
1621 DOHUnit
* du
= nullptr;
1623 /* that means that the state was in use, possibly with an allocated
1624 DOHUnit that we will need to handle, but we can't touch it before
1625 confirming that we now own this state */
1626 if (ids
->isInUse()) {
1630 /* we atomically replace the value, we now own this state */
1631 if (!ids
->markAsUsed()) {
1632 /* the state was not in use.
1633 we reset 'du' because it might have still been in use when we read it. */
1638 /* we are reusing a state, no change in outstanding but if there was an existing DOHUnit we need
1639 to handle it because it's about to be overwritten. */
1642 ++g_stats
.downstreamTimeouts
;
1643 handleDOHTimeout(du
);
1647 ids
->origFD
= cs
.udpFD
;
1648 ids
->origID
= dh
->id
;
1649 setIDStateFromDNSQuestion(*ids
, dq
, std::move(qname
));
1651 /* If we couldn't harvest the real dest addr, still
1652 write down the listening addr since it will be useful
1653 (especially if it's not an 'any' one).
1654 We need to keep track of which one it is since we may
1655 want to use the real but not the listening addr to reply.
1657 if (dest
.sin4
.sin_family
!= 0) {
1658 ids
->origDest
= dest
;
1659 ids
->destHarvested
= true;
1662 ids
->origDest
= cs
.local
;
1663 ids
->destHarvested
= false;
1668 int fd
= pickBackendSocketForSending(ss
);
1669 ssize_t ret
= udpClientSendRequestToBackend(ss
, fd
, query
, dq
.len
);
1673 ++g_stats
.downstreamSendErrors
;
1676 vinfolog("Got query for %s|%s from %s, relayed to %s", ids
->qname
.toLogString(), QType(ids
->qtype
).getName(), remote
.toStringWithPort(), ss
->getName());
1678 catch(const std::exception
& e
){
1679 vinfolog("Got an error in UDP question thread while parsing a query from %s, id %d: %s", remote
.toStringWithPort(), queryId
, e
.what());
1683 #if defined(HAVE_RECVMMSG) && defined(HAVE_SENDMMSG) && defined(MSG_WAITFORONE)
1684 static void MultipleMessagesUDPClientThread(ClientState
* cs
, LocalHolders
& holders
)
1688 char packet
[s_maxPacketCacheEntrySize
];
1689 ComboAddress remote
;
1692 /* used by HarvestDestinationAddress */
1693 cmsgbuf_aligned cbuf
;
1695 const size_t vectSize
= g_udpVectorSize
;
1696 /* the actual buffer is larger because:
1697 - we may have to add EDNS and/or ECS
1698 - we use it for self-generated responses (from rule or cache)
1699 but we only accept incoming payloads up to that size
1701 static_assert(s_udpIncomingBufferSize
<= sizeof(MMReceiver::packet
), "the incoming buffer size should not be larger than sizeof(MMReceiver::packet)");
1703 auto recvData
= std::unique_ptr
<MMReceiver
[]>(new MMReceiver
[vectSize
]);
1704 auto msgVec
= std::unique_ptr
<struct mmsghdr
[]>(new struct mmsghdr
[vectSize
]);
1705 auto outMsgVec
= std::unique_ptr
<struct mmsghdr
[]>(new struct mmsghdr
[vectSize
]);
1707 /* initialize the structures needed to receive our messages */
1708 for (size_t idx
= 0; idx
< vectSize
; idx
++) {
1709 recvData
[idx
].remote
.sin4
.sin_family
= cs
->local
.sin4
.sin_family
;
1710 fillMSGHdr(&msgVec
[idx
].msg_hdr
, &recvData
[idx
].iov
, &recvData
[idx
].cbuf
, sizeof(recvData
[idx
].cbuf
), recvData
[idx
].packet
, s_udpIncomingBufferSize
, &recvData
[idx
].remote
);
1716 /* reset the IO vector, since it's also used to send the vector of responses
1717 to avoid having to copy the data around */
1718 for (size_t idx
= 0; idx
< vectSize
; idx
++) {
1719 recvData
[idx
].iov
.iov_base
= recvData
[idx
].packet
;
1720 recvData
[idx
].iov
.iov_len
= sizeof(recvData
[idx
].packet
);
1723 /* block until we have at least one message ready, but return
1724 as many as possible to save the syscall costs */
1725 int msgsGot
= recvmmsg(cs
->udpFD
, msgVec
.get(), vectSize
, MSG_WAITFORONE
| MSG_TRUNC
, nullptr);
1728 vinfolog("Getting UDP messages via recvmmsg() failed with: %s", stringerror());
1732 unsigned int msgsToSend
= 0;
1734 /* process the received messages */
1735 for (int msgIdx
= 0; msgIdx
< msgsGot
; msgIdx
++) {
1736 const struct msghdr
* msgh
= &msgVec
[msgIdx
].msg_hdr
;
1737 unsigned int got
= msgVec
[msgIdx
].msg_len
;
1738 const ComboAddress
& remote
= recvData
[msgIdx
].remote
;
1740 if (static_cast<size_t>(got
) < sizeof(struct dnsheader
)) {
1741 ++g_stats
.nonCompliantQueries
;
1745 processUDPQuery(*cs
, holders
, msgh
, remote
, recvData
[msgIdx
].dest
, recvData
[msgIdx
].packet
, static_cast<uint16_t>(got
), sizeof(recvData
[msgIdx
].packet
), outMsgVec
.get(), &msgsToSend
, &recvData
[msgIdx
].iov
, &recvData
[msgIdx
].cbuf
);
1749 /* immediate (not delayed or sent to a backend) responses (mostly from a rule, dynamic block
1750 or the cache) can be sent in batch too */
1752 if (msgsToSend
> 0 && msgsToSend
<= static_cast<unsigned int>(msgsGot
)) {
1753 int sent
= sendmmsg(cs
->udpFD
, outMsgVec
.get(), msgsToSend
, 0);
1755 if (sent
< 0 || static_cast<unsigned int>(sent
) != msgsToSend
) {
1756 vinfolog("Error sending responses with sendmmsg() (%d on %u): %s", sent
, msgsToSend
, stringerror());
1762 #endif /* defined(HAVE_RECVMMSG) && defined(HAVE_SENDMMSG) && defined(MSG_WAITFORONE) */
1764 // listens to incoming queries, sends out to downstream servers, noting the intended return path
1765 static void udpClientThread(ClientState
* cs
)
1768 setThreadName("dnsdist/udpClie");
1769 LocalHolders holders
;
1771 #if defined(HAVE_RECVMMSG) && defined(HAVE_SENDMMSG) && defined(MSG_WAITFORONE)
1772 if (g_udpVectorSize
> 1) {
1773 MultipleMessagesUDPClientThread(cs
, holders
);
1777 #endif /* defined(HAVE_RECVMMSG) && defined(HAVE_SENDMMSG) && defined(MSG_WAITFORONE) */
1779 char packet
[s_maxPacketCacheEntrySize
];
1780 /* the actual buffer is larger because:
1781 - we may have to add EDNS and/or ECS
1782 - we use it for self-generated responses (from rule or cache)
1783 but we only accept incoming payloads up to that size
1785 static_assert(s_udpIncomingBufferSize
<= sizeof(packet
), "the incoming buffer size should not be larger than sizeof(MMReceiver::packet)");
1788 /* used by HarvestDestinationAddress */
1789 cmsgbuf_aligned cbuf
;
1791 ComboAddress remote
;
1793 remote
.sin4
.sin_family
= cs
->local
.sin4
.sin_family
;
1794 fillMSGHdr(&msgh
, &iov
, &cbuf
, sizeof(cbuf
), packet
, s_udpIncomingBufferSize
, &remote
);
1797 ssize_t got
= recvmsg(cs
->udpFD
, &msgh
, 0);
1799 if (got
< 0 || static_cast<size_t>(got
) < sizeof(struct dnsheader
)) {
1800 ++g_stats
.nonCompliantQueries
;
1804 processUDPQuery(*cs
, holders
, &msgh
, remote
, dest
, packet
, static_cast<uint16_t>(got
), sizeof(packet
), nullptr, nullptr, nullptr, nullptr);
1808 catch(const std::exception
&e
)
1810 errlog("UDP client thread died because of exception: %s", e
.what());
1812 catch(const PDNSException
&e
)
1814 errlog("UDP client thread died because of PowerDNS exception: %s", e
.reason
);
1818 errlog("UDP client thread died because of an exception: %s", "unknown");
1821 uint16_t getRandomDNSID()
1823 #ifdef HAVE_LIBSODIUM
1824 return (randombytes_random() % 65536);
1826 return (random() % 65536);
1830 static bool upCheck(const shared_ptr
<DownstreamState
>& ds
)
1833 DNSName checkName
= ds
->checkName
;
1834 uint16_t checkType
= ds
->checkType
.getCode();
1835 uint16_t checkClass
= ds
->checkClass
;
1836 dnsheader checkHeader
;
1837 memset(&checkHeader
, 0, sizeof(checkHeader
));
1839 checkHeader
.qdcount
= htons(1);
1840 checkHeader
.id
= getRandomDNSID();
1842 checkHeader
.rd
= true;
1844 checkHeader
.cd
= true;
1847 if (ds
->checkFunction
) {
1848 std::lock_guard
<std::mutex
> lock(g_luamutex
);
1849 auto ret
= ds
->checkFunction(checkName
, checkType
, checkClass
, &checkHeader
);
1850 checkName
= std::get
<0>(ret
);
1851 checkType
= std::get
<1>(ret
);
1852 checkClass
= std::get
<2>(ret
);
1855 vector
<uint8_t> packet
;
1856 DNSPacketWriter
dpw(packet
, checkName
, checkType
, checkClass
);
1857 dnsheader
* requestHeader
= dpw
.getHeader();
1858 *requestHeader
= checkHeader
;
1860 Socket
sock(ds
->remote
.sin4
.sin_family
, SOCK_DGRAM
);
1861 sock
.setNonBlocking();
1862 if (!IsAnyAddress(ds
->sourceAddr
)) {
1863 sock
.setReuseAddr();
1864 if (!ds
->sourceItfName
.empty()) {
1865 #ifdef SO_BINDTODEVICE
1866 int res
= setsockopt(sock
.getHandle(), SOL_SOCKET
, SO_BINDTODEVICE
, ds
->sourceItfName
.c_str(), ds
->sourceItfName
.length());
1867 if (res
!= 0 && g_verboseHealthChecks
) {
1868 infolog("Error setting SO_BINDTODEVICE on the health check socket for backend '%s': %s", ds
->getNameWithAddr(), stringerror());
1872 sock
.bind(ds
->sourceAddr
);
1874 sock
.connect(ds
->remote
);
1875 ssize_t sent
= udpClientSendRequestToBackend(ds
, sock
.getHandle(), reinterpret_cast<char*>(&packet
[0]), packet
.size(), true);
1878 if (g_verboseHealthChecks
)
1879 infolog("Error while sending a health check query to backend %s: %d", ds
->getNameWithAddr(), ret
);
1883 int ret
= waitForRWData(sock
.getHandle(), true, /* ms to seconds */ ds
->checkTimeout
/ 1000, /* remaining ms to us */ (ds
->checkTimeout
% 1000) * 1000);
1884 if(ret
< 0 || !ret
) { // error, timeout, both are down!
1887 if (g_verboseHealthChecks
)
1888 infolog("Error while waiting for the health check response from backend %s: %d", ds
->getNameWithAddr(), ret
);
1891 if (g_verboseHealthChecks
)
1892 infolog("Timeout while waiting for the health check response from backend %s", ds
->getNameWithAddr());
1899 sock
.recvFrom(reply
, from
);
1901 /* we are using a connected socket but hey.. */
1902 if (from
!= ds
->remote
) {
1903 if (g_verboseHealthChecks
)
1904 infolog("Invalid health check response received from %s, expecting one from %s", from
.toStringWithPort(), ds
->remote
.toStringWithPort());
1908 const dnsheader
* responseHeader
= reinterpret_cast<const dnsheader
*>(reply
.c_str());
1910 if (reply
.size() < sizeof(*responseHeader
)) {
1911 if (g_verboseHealthChecks
)
1912 infolog("Invalid health check response of size %d from backend %s, expecting at least %d", reply
.size(), ds
->getNameWithAddr(), sizeof(*responseHeader
));
1916 if (responseHeader
->id
!= requestHeader
->id
) {
1917 if (g_verboseHealthChecks
)
1918 infolog("Invalid health check response id %d from backend %s, expecting %d", responseHeader
->id
, ds
->getNameWithAddr(), requestHeader
->id
);
1922 if (!responseHeader
->qr
) {
1923 if (g_verboseHealthChecks
)
1924 infolog("Invalid health check response from backend %s, expecting QR to be set", ds
->getNameWithAddr());
1928 if (responseHeader
->rcode
== RCode::ServFail
) {
1929 if (g_verboseHealthChecks
)
1930 infolog("Backend %s responded to health check with ServFail", ds
->getNameWithAddr());
1934 if (ds
->mustResolve
&& (responseHeader
->rcode
== RCode::NXDomain
|| responseHeader
->rcode
== RCode::Refused
)) {
1935 if (g_verboseHealthChecks
)
1936 infolog("Backend %s responded to health check with %s while mustResolve is set", ds
->getNameWithAddr(), responseHeader
->rcode
== RCode::NXDomain
? "NXDomain" : "Refused");
1940 uint16_t receivedType
;
1941 uint16_t receivedClass
;
1942 DNSName
receivedName(reply
.c_str(), reply
.size(), sizeof(dnsheader
), false, &receivedType
, &receivedClass
);
1944 if (receivedName
!= checkName
|| receivedType
!= checkType
|| receivedClass
!= checkClass
) {
1945 if (g_verboseHealthChecks
)
1946 infolog("Backend %s responded to health check with an invalid qname (%s vs %s), qtype (%s vs %s) or qclass (%d vs %d)", ds
->getNameWithAddr(), receivedName
.toLogString(), checkName
.toLogString(), QType(receivedType
).getName(), QType(checkType
).getName(), receivedClass
, checkClass
);
1952 catch(const std::exception
& e
)
1954 if (g_verboseHealthChecks
)
1955 infolog("Error checking the health of backend %s: %s", ds
->getNameWithAddr(), e
.what());
1960 if (g_verboseHealthChecks
)
1961 infolog("Unknown exception while checking the health of backend %s", ds
->getNameWithAddr());
1965 uint64_t g_maxTCPClientThreads
{10};
1966 std::atomic
<uint16_t> g_cacheCleaningDelay
{60};
1967 std::atomic
<uint16_t> g_cacheCleaningPercentage
{100};
1971 setThreadName("dnsdist/main");
1974 int32_t secondsToWaitLog
= 0;
1980 std::lock_guard
<std::mutex
> lock(g_luamutex
);
1981 auto f
= g_lua
.readVariable
<boost::optional
<std::function
<void()> > >("maintenance");
1985 secondsToWaitLog
= 0;
1987 catch(std::exception
&e
) {
1988 if (secondsToWaitLog
<= 0) {
1989 infolog("Error during execution of maintenance function: %s", e
.what());
1990 secondsToWaitLog
= 61;
1992 secondsToWaitLog
-= interval
;
1998 if (counter
>= g_cacheCleaningDelay
) {
1999 /* keep track, for each cache, of whether we should keep
2001 std::map
<std::shared_ptr
<DNSDistPacketCache
>, bool> caches
;
2003 /* gather all caches actually used by at least one pool, and see
2004 if something prevents us from cleaning the expired entries */
2005 auto localPools
= g_pools
.getLocal();
2006 for (const auto& entry
: *localPools
) {
2007 auto& pool
= entry
.second
;
2009 auto packetCache
= pool
->packetCache
;
2014 auto pair
= caches
.insert({packetCache
, false});
2015 auto& iter
= pair
.first
;
2016 /* if we need to keep stale data for this cache (ie, not clear
2017 expired entries when at least one pool using this cache
2018 has all its backends down) */
2019 if (packetCache
->keepStaleData() && iter
->second
== false) {
2020 /* so far all pools had at least one backend up */
2021 if (pool
->countServers(true) == 0) {
2022 iter
->second
= true;
2027 for (auto pair
: caches
) {
2028 /* shall we keep expired entries ? */
2029 if (pair
.second
== true) {
2032 auto& packetCache
= pair
.first
;
2033 size_t upTo
= (packetCache
->getMaxEntries()* (100 - g_cacheCleaningPercentage
)) / 100;
2034 packetCache
->purgeExpired(upTo
);
2039 // ponder pruning g_dynblocks of expired entries here
2043 static void secPollThread()
2045 setThreadName("dnsdist/secpoll");
2049 doSecPoll(g_secPollSuffix
);
2053 sleep(g_secPollInterval
);
2057 static void healthChecksThread()
2059 setThreadName("dnsdist/healthC");
2066 if(g_tcpclientthreads
->getQueuedCount() > 1 && !g_tcpclientthreads
->hasReachedMaxThreads())
2067 g_tcpclientthreads
->addTCPClientThread();
2069 auto states
= g_dstates
.getLocal(); // this points to the actual shared_ptrs!
2070 for(auto& dss
: *states
) {
2071 if(++dss
->lastCheck
< dss
->checkInterval
)
2074 if(dss
->availability
==DownstreamState::Availability::Auto
) {
2075 bool newState
=upCheck(dss
);
2077 /* check succeeded */
2078 dss
->currentCheckFailures
= 0;
2080 if (!dss
->upStatus
) {
2081 /* we were marked as down */
2082 dss
->consecutiveSuccessfulChecks
++;
2083 if (dss
->consecutiveSuccessfulChecks
< dss
->minRiseSuccesses
) {
2084 /* if we need more than one successful check to rise
2085 and we didn't reach the threshold yet,
2093 dss
->consecutiveSuccessfulChecks
= 0;
2095 if (dss
->upStatus
) {
2096 /* we are currently up */
2097 dss
->currentCheckFailures
++;
2098 if (dss
->currentCheckFailures
< dss
->maxCheckFailures
) {
2099 /* we need more than one failure to be marked as down,
2100 and we did not reach the threshold yet, let's stay down */
2106 if(newState
!= dss
->upStatus
) {
2107 warnlog("Marking downstream %s as '%s'", dss
->getNameWithAddr(), newState
? "up" : "down");
2109 if (newState
&& !dss
->connected
) {
2110 newState
= dss
->reconnect();
2112 if (dss
->connected
&& !dss
->threadStarted
.test_and_set()) {
2113 dss
->tid
= thread(responderThread
, dss
);
2117 dss
->upStatus
= newState
;
2118 dss
->currentCheckFailures
= 0;
2119 dss
->consecutiveSuccessfulChecks
= 0;
2120 if (g_snmpAgent
&& g_snmpTrapsEnabled
) {
2121 g_snmpAgent
->sendBackendStatusChangeTrap(dss
);
2126 auto delta
= dss
->sw
.udiffAndSet()/1000000.0;
2127 dss
->queryLoad
= 1.0*(dss
->queries
.load() - dss
->prev
.queries
.load())/delta
;
2128 dss
->dropRate
= 1.0*(dss
->reuseds
.load() - dss
->prev
.reuseds
.load())/delta
;
2129 dss
->prev
.queries
.store(dss
->queries
.load());
2130 dss
->prev
.reuseds
.store(dss
->reuseds
.load());
2132 for(IDState
& ids
: dss
->idStates
) { // timeouts
2133 int64_t usageIndicator
= ids
.usageIndicator
;
2134 if(IDState::isInUse(usageIndicator
) && ids
.age
++ > g_udpTimeout
) {
2135 /* We mark the state as unused as soon as possible
2136 to limit the risk of racing with the
2139 auto oldDU
= ids
.du
;
2141 if (!ids
.tryMarkUnused(usageIndicator
)) {
2142 /* this state has been altered in the meantime,
2143 don't go anywhere near it */
2147 handleDOHTimeout(oldDU
);
2151 ++g_stats
.downstreamTimeouts
; // this is an 'actively' discovered timeout
2152 vinfolog("Had a downstream timeout from %s (%s) for query for %s|%s from %s",
2153 dss
->remote
.toStringWithPort(), dss
->name
,
2154 ids
.qname
.toLogString(), QType(ids
.qtype
).getName(), ids
.origRemote
.toStringWithPort());
2159 struct dnsheader fake
;
2160 memset(&fake
, 0, sizeof(fake
));
2161 fake
.id
= ids
.origID
;
2163 g_rings
.insertResponse(ts
, ids
.origRemote
, ids
.qname
, ids
.qtype
, std::numeric_limits
<unsigned int>::max(), 0, fake
, dss
->remote
);
2170 static void bindAny(int af
, int sock
)
2172 __attribute__((unused
)) int one
= 1;
2175 if (setsockopt(sock
, IPPROTO_IP
, IP_FREEBIND
, &one
, sizeof(one
)) < 0)
2176 warnlog("Warning: IP_FREEBIND setsockopt failed: %s", stringerror());
2181 if (setsockopt(sock
, IPPROTO_IP
, IP_BINDANY
, &one
, sizeof(one
)) < 0)
2182 warnlog("Warning: IP_BINDANY setsockopt failed: %s", stringerror());
2186 if (setsockopt(sock
, IPPROTO_IPV6
, IPV6_BINDANY
, &one
, sizeof(one
)) < 0)
2187 warnlog("Warning: IPV6_BINDANY setsockopt failed: %s", stringerror());
2190 if (setsockopt(sock
, SOL_SOCKET
, SO_BINDANY
, &one
, sizeof(one
)) < 0)
2191 warnlog("Warning: SO_BINDANY setsockopt failed: %s", stringerror());
2195 static void dropGroupPrivs(gid_t gid
)
2198 if (setgid(gid
) == 0) {
2199 if (setgroups(0, NULL
) < 0) {
2200 warnlog("Warning: Unable to drop supplementary gids: %s", stringerror());
2204 warnlog("Warning: Unable to set group ID to %d: %s", gid
, stringerror());
2209 static void dropUserPrivs(uid_t uid
)
2212 if(setuid(uid
) < 0) {
2213 warnlog("Warning: Unable to set user ID to %d: %s", uid
, stringerror());
2218 static void checkFileDescriptorsLimits(size_t udpBindsCount
, size_t tcpBindsCount
)
2220 /* stdin, stdout, stderr */
2221 size_t requiredFDsCount
= 3;
2222 auto backends
= g_dstates
.getLocal();
2223 /* UDP sockets to backends */
2224 size_t backendUDPSocketsCount
= 0;
2225 for (const auto& backend
: *backends
) {
2226 backendUDPSocketsCount
+= backend
->sockets
.size();
2228 requiredFDsCount
+= backendUDPSocketsCount
;
2229 /* TCP sockets to backends */
2230 requiredFDsCount
+= (backends
->size() * g_maxTCPClientThreads
);
2231 /* listening sockets */
2232 requiredFDsCount
+= udpBindsCount
;
2233 requiredFDsCount
+= tcpBindsCount
;
2234 /* max TCP connections currently served */
2235 requiredFDsCount
+= g_maxTCPClientThreads
;
2236 /* max pipes for communicating between TCP acceptors and client threads */
2237 requiredFDsCount
+= (g_maxTCPClientThreads
* 2);
2238 /* max TCP queued connections */
2239 requiredFDsCount
+= g_maxTCPQueuedConnections
;
2240 /* DelayPipe pipe */
2241 requiredFDsCount
+= 2;
2244 /* webserver main socket */
2246 /* console main socket */
2253 getrlimit(RLIMIT_NOFILE
, &rl
);
2254 if (rl
.rlim_cur
<= requiredFDsCount
) {
2255 warnlog("Warning, this configuration can use more than %d file descriptors, web server and console connections not included, and the current limit is %d.", std::to_string(requiredFDsCount
), std::to_string(rl
.rlim_cur
));
2257 warnlog("You can increase this value by using LimitNOFILE= in the systemd unit file or ulimit.");
2259 warnlog("You can increase this value by using ulimit.");
2264 static void setUpLocalBind(std::unique_ptr
<ClientState
>& cs
)
2266 /* skip some warnings if there is an identical UDP context */
2267 bool warn
= cs
->tcp
== false || cs
->tlsFrontend
!= nullptr || cs
->dohFrontend
!= nullptr;
2268 int& fd
= cs
->tcp
== false ? cs
->udpFD
: cs
->tcpFD
;
2271 fd
= SSocket(cs
->local
.sin4
.sin_family
, cs
->tcp
== false ? SOCK_DGRAM
: SOCK_STREAM
, 0);
2274 SSetsockopt(fd
, SOL_SOCKET
, SO_REUSEADDR
, 1);
2275 #ifdef TCP_DEFER_ACCEPT
2276 SSetsockopt(fd
, IPPROTO_TCP
, TCP_DEFER_ACCEPT
, 1);
2278 if (cs
->fastOpenQueueSize
> 0) {
2280 SSetsockopt(fd
, IPPROTO_TCP
, TCP_FASTOPEN
, cs
->fastOpenQueueSize
);
2283 warnlog("TCP Fast Open has been configured on local address '%s' but is not supported", cs
->local
.toStringWithPort());
2289 if(cs
->local
.sin4
.sin_family
== AF_INET6
) {
2290 SSetsockopt(fd
, IPPROTO_IPV6
, IPV6_V6ONLY
, 1);
2293 bindAny(cs
->local
.sin4
.sin_family
, fd
);
2295 if(!cs
->tcp
&& IsAnyAddress(cs
->local
)) {
2297 setsockopt(fd
, IPPROTO_IP
, GEN_IP_PKTINFO
, &one
, sizeof(one
)); // linux supports this, so why not - might fail on other systems
2298 #ifdef IPV6_RECVPKTINFO
2299 setsockopt(fd
, IPPROTO_IPV6
, IPV6_RECVPKTINFO
, &one
, sizeof(one
));
2303 if (cs
->reuseport
) {
2305 SSetsockopt(fd
, SOL_SOCKET
, SO_REUSEPORT
, 1);
2308 /* no need to warn again if configured but support is not available, we already did for UDP */
2309 warnlog("SO_REUSEPORT has been configured on local address '%s' but is not supported", cs
->local
.toStringWithPort());
2315 if (cs
->local
.isIPv4()) {
2317 setSocketIgnorePMTU(cs
->udpFD
);
2319 catch(const std::exception
& e
) {
2320 warnlog("Failed to set IP_MTU_DISCOVER on UDP server socket for local address '%s': %s", cs
->local
.toStringWithPort(), e
.what());
2325 const std::string
& itf
= cs
->interface
;
2327 #ifdef SO_BINDTODEVICE
2328 int res
= setsockopt(fd
, SOL_SOCKET
, SO_BINDTODEVICE
, itf
.c_str(), itf
.length());
2330 warnlog("Error setting up the interface on local address '%s': %s", cs
->local
.toStringWithPort(), stringerror());
2334 warnlog("An interface has been configured on local address '%s' but SO_BINDTODEVICE is not supported", cs
->local
.toStringWithPort());
2340 if (g_defaultBPFFilter
) {
2341 cs
->attachFilter(g_defaultBPFFilter
);
2342 vinfolog("Attaching default BPF Filter to %s frontend %s", (!cs
->tcp
? "UDP" : "TCP"), cs
->local
.toStringWithPort());
2344 #endif /* HAVE_EBPF */
2346 if (cs
->tlsFrontend
!= nullptr) {
2347 if (!cs
->tlsFrontend
->setupTLS()) {
2348 errlog("Error while setting up TLS on local address '%s', exiting", cs
->local
.toStringWithPort());
2349 _exit(EXIT_FAILURE
);
2353 if (cs
->dohFrontend
!= nullptr) {
2354 cs
->dohFrontend
->setup();
2357 SBind(fd
, cs
->local
);
2360 SListen(cs
->tcpFD
, SOMAXCONN
);
2361 if (cs
->tlsFrontend
!= nullptr) {
2362 warnlog("Listening on %s for TLS", cs
->local
.toStringWithPort());
2364 else if (cs
->dohFrontend
!= nullptr) {
2365 warnlog("Listening on %s for DoH", cs
->local
.toStringWithPort());
2367 else if (cs
->dnscryptCtx
!= nullptr) {
2368 warnlog("Listening on %s for DNSCrypt", cs
->local
.toStringWithPort());
2371 warnlog("Listening on %s", cs
->local
.toStringWithPort());
2380 vector
<string
> locals
;
2381 vector
<string
> remotes
;
2382 bool checkConfig
{false};
2383 bool beClient
{false};
2384 bool beSupervised
{false};
2391 std::atomic
<bool> g_configurationDone
{false};
2396 cout
<<"Syntax: dnsdist [-C,--config file] [-c,--client [IP[:PORT]]]\n";
2397 cout
<<"[-e,--execute cmd] [-h,--help] [-l,--local addr]\n";
2398 cout
<<"[-v,--verbose] [--check-config] [--version]\n";
2400 cout
<<"-a,--acl netmask Add this netmask to the ACL\n";
2401 cout
<<"-C,--config file Load configuration from 'file'\n";
2402 cout
<<"-c,--client Operate as a client, connect to dnsdist. This reads\n";
2403 cout
<<" controlSocket from your configuration file, but also\n";
2404 cout
<<" accepts an IP:PORT argument\n";
2405 #ifdef HAVE_LIBSODIUM
2406 cout
<<"-k,--setkey KEY Use KEY for encrypted communication to dnsdist. This\n";
2407 cout
<<" is similar to setting setKey in the configuration file.\n";
2408 cout
<<" NOTE: this will leak this key in your shell's history\n";
2409 cout
<<" and in the systems running process list.\n";
2411 cout
<<"--check-config Validate the configuration file and exit. The exit-code\n";
2412 cout
<<" reflects the validation, 0 is OK, 1 means an error.\n";
2413 cout
<<" Any errors are printed as well.\n";
2414 cout
<<"-e,--execute cmd Connect to dnsdist and execute 'cmd'\n";
2415 cout
<<"-g,--gid gid Change the process group ID after binding sockets\n";
2416 cout
<<"-h,--help Display this helpful message\n";
2417 cout
<<"-l,--local address Listen on this local address\n";
2418 cout
<<"--supervised Don't open a console, I'm supervised\n";
2419 cout
<<" (use with e.g. systemd and daemontools)\n";
2420 cout
<<"--disable-syslog Don't log to syslog, only to stdout\n";
2421 cout
<<" (use with e.g. systemd)\n";
2422 cout
<<"-u,--uid uid Change the process user ID after binding sockets\n";
2423 cout
<<"-v,--verbose Enable verbose mode\n";
2424 cout
<<"-V,--version Show dnsdist version information and exit\n";
2427 int main(int argc
, char** argv
)
2430 size_t udpBindsCount
= 0;
2431 size_t tcpBindsCount
= 0;
2432 rl_attempted_completion_function
= my_completion
;
2433 rl_completion_append_character
= 0;
2435 signal(SIGPIPE
, SIG_IGN
);
2436 signal(SIGCHLD
, SIG_IGN
);
2437 openlog("dnsdist", LOG_PID
|LOG_NDELAY
, LOG_DAEMON
);
2439 #ifdef HAVE_LIBSODIUM
2440 if (sodium_init() == -1) {
2441 cerr
<<"Unable to initialize crypto library"<<endl
;
2444 g_hashperturb
=randombytes_uniform(0xffffffff);
2445 srandom(randombytes_uniform(0xffffffff));
2449 gettimeofday(&tv
, 0);
2450 srandom(tv
.tv_sec
^ tv
.tv_usec
^ getpid());
2451 g_hashperturb
=random();
2455 ComboAddress clientAddress
= ComboAddress();
2456 g_cmdLine
.config
=SYSCONFDIR
"/dnsdist.conf";
2457 struct option longopts
[]={
2458 {"acl", required_argument
, 0, 'a'},
2459 {"check-config", no_argument
, 0, 1},
2460 {"client", no_argument
, 0, 'c'},
2461 {"config", required_argument
, 0, 'C'},
2462 {"disable-syslog", no_argument
, 0, 2},
2463 {"execute", required_argument
, 0, 'e'},
2464 {"gid", required_argument
, 0, 'g'},
2465 {"help", no_argument
, 0, 'h'},
2466 {"local", required_argument
, 0, 'l'},
2467 {"setkey", required_argument
, 0, 'k'},
2468 {"supervised", no_argument
, 0, 3},
2469 {"uid", required_argument
, 0, 'u'},
2470 {"verbose", no_argument
, 0, 'v'},
2471 {"version", no_argument
, 0, 'V'},
2477 int c
=getopt_long(argc
, argv
, "a:cC:e:g:hk:l:u:vV", longopts
, &longindex
);
2482 g_cmdLine
.checkConfig
=true;
2488 g_cmdLine
.beSupervised
=true;
2491 g_cmdLine
.config
=optarg
;
2494 g_cmdLine
.beClient
=true;
2497 g_cmdLine
.command
=optarg
;
2500 g_cmdLine
.gid
=optarg
;
2503 cout
<<"dnsdist "<<VERSION
<<endl
;
2510 g_ACL
.modify([optstring
](NetmaskGroup
& nmg
) { nmg
.addMask(optstring
); });
2513 #ifdef HAVE_LIBSODIUM
2514 if (B64Decode(string(optarg
), g_consoleKey
) < 0) {
2515 cerr
<<"Unable to decode key '"<<optarg
<<"'."<<endl
;
2519 cerr
<<"dnsdist has been built without libsodium, -k/--setkey is unsupported."<<endl
;
2524 g_cmdLine
.locals
.push_back(trim_copy(string(optarg
)));
2527 g_cmdLine
.uid
=optarg
;
2533 #ifdef LUAJIT_VERSION
2534 cout
<<"dnsdist "<<VERSION
<<" ("<<LUA_RELEASE
<<" ["<<LUAJIT_VERSION
<<"])"<<endl
;
2536 cout
<<"dnsdist "<<VERSION
<<" ("<<LUA_RELEASE
<<")"<<endl
;
2538 cout
<<"Enabled features: ";
2542 #ifdef HAVE_DNS_OVER_TLS
2543 cout
<<"dns-over-tls(";
2555 #ifdef HAVE_DNS_OVER_HTTPS
2556 cout
<<"dns-over-https(DOH) ";
2558 #ifdef HAVE_DNSCRYPT
2567 #ifdef HAVE_LIBCRYPTO
2570 #ifdef HAVE_LIBSODIUM
2576 #ifdef HAVE_PROTOBUF
2582 #if defined(HAVE_RECVMMSG) && defined(HAVE_SENDMMSG) && defined(MSG_WAITFORONE)
2583 cout
<<"recvmmsg/sendmmsg ";
2585 #ifdef HAVE_NET_SNMP
2595 //getopt_long printed an error message.
2604 for(auto p
= argv
; *p
; ++p
) {
2605 if(g_cmdLine
.beClient
) {
2606 clientAddress
= ComboAddress(*p
, 5199);
2608 g_cmdLine
.remotes
.push_back(*p
);
2612 ServerPolicy leastOutstandingPol
{"leastOutstanding", leastOutstanding
, false};
2614 g_policy
.setState(leastOutstandingPol
);
2615 if(g_cmdLine
.beClient
|| !g_cmdLine
.command
.empty()) {
2616 setupLua(true, g_cmdLine
.config
);
2617 if (clientAddress
!= ComboAddress())
2618 g_serverControl
= clientAddress
;
2619 doClient(g_serverControl
, g_cmdLine
.command
);
2620 _exit(EXIT_SUCCESS
);
2623 auto acl
= g_ACL
.getCopy();
2625 for(auto& addr
: {"127.0.0.0/8", "10.0.0.0/8", "100.64.0.0/10", "169.254.0.0/16", "192.168.0.0/16", "172.16.0.0/12", "::1/128", "fc00::/7", "fe80::/10"})
2627 g_ACL
.setState(acl
);
2630 auto consoleACL
= g_consoleACL
.getCopy();
2631 for (const auto& mask
: { "127.0.0.1/8", "::1/128" }) {
2632 consoleACL
.addMask(mask
);
2634 g_consoleACL
.setState(consoleACL
);
2636 if (g_cmdLine
.checkConfig
) {
2637 setupLua(true, g_cmdLine
.config
);
2638 // No exception was thrown
2639 infolog("Configuration '%s' OK!", g_cmdLine
.config
);
2640 _exit(EXIT_SUCCESS
);
2643 auto todo
=setupLua(false, g_cmdLine
.config
);
2645 auto localPools
= g_pools
.getCopy();
2647 bool precompute
= false;
2648 if (g_policy
.getLocal()->name
== "chashed") {
2651 for (const auto& entry
: localPools
) {
2652 if (entry
.second
->policy
!= nullptr && entry
.second
->policy
->name
== "chashed") {
2659 vinfolog("Pre-computing hashes for consistent hash load-balancing policy");
2660 // pre compute hashes
2661 auto backends
= g_dstates
.getLocal();
2662 for (auto& backend
: *backends
) {
2668 if (!g_cmdLine
.locals
.empty()) {
2669 for (auto it
= g_frontends
.begin(); it
!= g_frontends
.end(); ) {
2670 /* DoH, DoT and DNSCrypt frontends are separate */
2671 if ((*it
)->dohFrontend
== nullptr && (*it
)->tlsFrontend
== nullptr && (*it
)->dnscryptCtx
== nullptr) {
2672 it
= g_frontends
.erase(it
);
2679 for(const auto& loc
: g_cmdLine
.locals
) {
2681 g_frontends
.push_back(std::unique_ptr
<ClientState
>(new ClientState(ComboAddress(loc
, 53), false, false, 0, "", {})));
2683 g_frontends
.push_back(std::unique_ptr
<ClientState
>(new ClientState(ComboAddress(loc
, 53), true, false, 0, "", {})));
2687 if (g_frontends
.empty()) {
2689 g_frontends
.push_back(std::unique_ptr
<ClientState
>(new ClientState(ComboAddress("127.0.0.1", 53), false, false, 0, "", {})));
2691 g_frontends
.push_back(std::unique_ptr
<ClientState
>(new ClientState(ComboAddress("127.0.0.1", 53), true, false, 0, "", {})));
2694 g_configurationDone
= true;
2696 for(auto& frontend
: g_frontends
) {
2697 setUpLocalBind(frontend
);
2699 if (frontend
->tcp
== false) {
2707 warnlog("dnsdist %s comes with ABSOLUTELY NO WARRANTY. This is free software, and you are welcome to redistribute it according to the terms of the GPL version 2", VERSION
);
2711 g_ACL
.getLocal()->toStringVector(&vec
);
2712 for(const auto& s
: vec
) {
2717 infolog("ACL allowing queries from: %s", acls
.c_str());
2720 g_consoleACL
.getLocal()->toStringVector(&vec
);
2721 for (const auto& entry
: vec
) {
2722 if (!acls
.empty()) {
2727 infolog("Console ACL allowing connections from: %s", acls
.c_str());
2729 #ifdef HAVE_LIBSODIUM
2730 if (g_consoleEnabled
&& g_consoleKey
.empty()) {
2731 warnlog("Warning, the console has been enabled via 'controlSocket()' but no key has been set with 'setKey()' so all connections will fail until a key has been set");
2738 if(!g_cmdLine
.gid
.empty())
2739 newgid
= strToGID(g_cmdLine
.gid
.c_str());
2741 if(!g_cmdLine
.uid
.empty())
2742 newuid
= strToUID(g_cmdLine
.uid
.c_str());
2744 dropGroupPrivs(newgid
);
2745 dropUserPrivs(newuid
);
2747 /* we might still have capabilities remaining,
2748 for example if we have been started as root
2749 without --uid or --gid (please don't do that)
2750 or as an unprivileged user with ambient
2751 capabilities like CAP_NET_BIND_SERVICE.
2753 dropCapabilities(g_capabilitiesToRetain
);
2755 catch(const std::exception
& e
) {
2756 warnlog("%s", e
.what());
2759 /* this need to be done _after_ dropping privileges */
2760 g_delay
= new DelayPipe
<DelayedPacket
>();
2766 g_tcpclientthreads
= std::unique_ptr
<TCPClientCollection
>(new TCPClientCollection(g_maxTCPClientThreads
, g_useTCPSinglePipe
));
2771 localPools
= g_pools
.getCopy();
2772 /* create the default pool no matter what */
2773 createPoolIfNotExists(localPools
, "");
2774 if(g_cmdLine
.remotes
.size()) {
2775 for(const auto& address
: g_cmdLine
.remotes
) {
2776 auto ret
=std::make_shared
<DownstreamState
>(ComboAddress(address
, 53));
2777 addServerToPool(localPools
, "", ret
);
2778 if (ret
->connected
&& !ret
->threadStarted
.test_and_set()) {
2779 ret
->tid
= thread(responderThread
, ret
);
2781 g_dstates
.modify([ret
](servers_t
& servers
) { servers
.push_back(ret
); });
2784 g_pools
.setState(localPools
);
2786 if(g_dstates
.getLocal()->empty()) {
2787 errlog("No downstream servers defined: all packets will get dropped");
2788 // you might define them later, but you need to know
2791 checkFileDescriptorsLimits(udpBindsCount
, tcpBindsCount
);
2793 for(auto& dss
: g_dstates
.getCopy()) { // it is a copy, but the internal shared_ptrs are the real deal
2794 if(dss
->availability
==DownstreamState::Availability::Auto
) {
2795 bool newState
=upCheck(dss
);
2796 warnlog("Marking downstream %s as '%s'", dss
->getNameWithAddr(), newState
? "up" : "down");
2797 dss
->upStatus
= newState
;
2801 for(auto& cs
: g_frontends
) {
2802 if (cs
->dohFrontend
!= nullptr) {
2803 #ifdef HAVE_DNS_OVER_HTTPS
2804 std::thread
t1(dohThread
, cs
.get());
2805 if (!cs
->cpus
.empty()) {
2806 mapThreadToCPUList(t1
.native_handle(), cs
->cpus
);
2809 #endif /* HAVE_DNS_OVER_HTTPS */
2812 if (cs
->udpFD
>= 0) {
2813 thread
t1(udpClientThread
, cs
.get());
2814 if (!cs
->cpus
.empty()) {
2815 mapThreadToCPUList(t1
.native_handle(), cs
->cpus
);
2819 else if (cs
->tcpFD
>= 0) {
2820 thread
t1(tcpAcceptorThread
, cs
.get());
2821 if (!cs
->cpus
.empty()) {
2822 mapThreadToCPUList(t1
.native_handle(), cs
->cpus
);
2828 thread
carbonthread(carbonDumpThread
);
2829 carbonthread
.detach();
2831 thread
stattid(maintThread
);
2834 thread
healththread(healthChecksThread
);
2836 if (!g_secPollSuffix
.empty()) {
2837 thread
secpollthread(secPollThread
);
2838 secpollthread
.detach();
2841 if(g_cmdLine
.beSupervised
) {
2843 sd_notify(0, "READY=1");
2845 healththread
.join();
2848 healththread
.detach();
2851 _exit(EXIT_SUCCESS
);
2854 catch(const LuaContext::ExecutionErrorException
& e
) {
2856 errlog("Fatal Lua error: %s", e
.what());
2857 std::rethrow_if_nested(e
);
2858 } catch(const std::exception
& ne
) {
2859 errlog("Details: %s", ne
.what());
2861 catch(PDNSException
&ae
)
2863 errlog("Fatal pdns error: %s", ae
.reason
);
2865 _exit(EXIT_FAILURE
);
2867 catch(std::exception
&e
)
2869 errlog("Fatal error: %s", e
.what());
2870 _exit(EXIT_FAILURE
);
2872 catch(PDNSException
&ae
)
2874 errlog("Fatal pdns error: %s", ae
.reason
);
2875 _exit(EXIT_FAILURE
);
2878 uint64_t getLatencyCount(const std::string
&)
2880 return g_stats
.responses
+ g_stats
.selfAnswered
+ g_stats
.cacheHits
;