name: Install Coverity tools
command: curl -s https://scan.coverity.com/download/linux64 --data "token=${COVERITY_TOKEN}&project=${COVERITY_PROJECT}" | gunzip | tar xvf /dev/stdin --strip-components=2 --no-same-owner -C /usr/local
+ add-docs-upload-ssh:
+ description: Add ssh known_hosts fingerprints
+ steps:
+ - run:
+ command: mkdir -p $HOME/.ssh && echo "${DOCS_HOST} ${DOCS_FINGERPRINT}" > $HOME/.ssh/known_hosts
+ - add_ssh_keys:
+ fingerprints:
+ - "3e:0a:aa:2c:30:69:89:f3:eb:17:c1:3f:3b:78:40:7a"
+
auth-regress-setup:
description: Prepare the environment for auth regression tests
steps:
- ccache-cache-{{ arch }}-<< parameters.product >>-{{ .Branch }}
- ccache-cache-{{ arch }}-<< parameters.product >>-
+ install-doc-deps:
+ description: Install dependencies needed to build the documentation
+ steps:
+ - run:
+ name: Install dependencies
+ command: |
+ apt-get update && apt-get -qq -y install \
+ autoconf \
+ automake \
+ bison \
+ curl \
+ flex \
+ g++ \
+ git \
+ latexmk \
+ libboost-all-dev \
+ libedit-dev \
+ libluajit-5.1-dev \
+ libssl-dev \
+ make \
+ pkg-config \
+ ragel \
+ virtualenv
+ if [ "${CIRCLE_PROJECT_USERNAME}" = "PowerDNS" -a "${CIRCLE_PROJECT_REPONAME}" = "pdns" -a "${CIRCLE_BRANCH}" = "master" ]; then
+ apt-get update && apt-get -qq -y install \
+ texlive-full
+ fi
+
+ build-auth-docs:
+ description: Build documentation
+ steps:
+ - run:
+ name: autoconf
+ command: |
+ BUILDER_VERSION=0.0.0-git1 autoreconf -vfi
+ - run:
+ name: configure
+ command: |
+ ./configure \
+ --disable-lua-records \
+ --disable-unit-tests \
+ --without-dynmodules \
+ --without-modules
+ - run:
+ name: build docs
+ command: |
+ make -C docs html-docs
+ if [ "${CIRCLE_PROJECT_USERNAME}" = "PowerDNS" -a "${CIRCLE_PROJECT_REPONAME}" = "pdns" -a "${CIRCLE_BRANCH}" = "master" ]; then
+ make -C docs all-docs
+ fi
+
+ upload-auth-docs:
+ steps:
+ - run:
+ name: Upload documents
+ command: |
+ if [ "${CIRCLE_PROJECT_USERNAME}" = "PowerDNS" -a "${CIRCLE_PROJECT_REPONAME}" = "pdns" -a "${CIRCLE_BRANCH}" = "master" ]; then
+ rsync -crv --delete --no-p --chmod=g=rwX --exclude '*~' ./docs/html-docs/ docs_powerdns_com@${DOCS_HOST}:/authoritative/
+ rsync -crv --no-p --chmod=g=rwX --exclude '*~' ./docs/html-docs.tar.bz2 docs_powerdns_com@${DOCS_HOST}:/authoritative/
+ rsync -crv --no-p --chmod=g=rwX --exclude '*~' ./docs/PowerDNS-Authoritative.pdf docs_powerdns_com@${DOCS_HOST}:/authoritative/
+ fi
+
+ build-recursor-docs:
+ description: Build Recursor documentation
+ steps:
+ - run:
+ name: autoconf
+ command: |
+ BUILDER_VERSION=0.0.0-git1 autoreconf -vfi
+ working_directory: ~/project/pdns/recursordist
+ - run:
+ name: configure
+ command: |
+ ./configure \
+ --disable-unit-tests \
+ --disable-protobuf
+ working_directory: ~/project/pdns/recursordist
+ - run:
+ name: build docs
+ command: |
+ make html-docs
+ if [ "${CIRCLE_PROJECT_USERNAME}" = "PowerDNS" -a "${CIRCLE_PROJECT_REPONAME}" = "pdns" -a "${CIRCLE_BRANCH}" = "master" ]; then
+ make all-docs
+ fi
+ working_directory: ~/project/pdns/recursordist
+
+ upload-recursor-docs:
+ steps:
+ - run:
+ name: Upload documents
+ working_directory: ~/project/pdns/recursordist
+ command: |
+ if [ "${CIRCLE_PROJECT_USERNAME}" = "PowerDNS" -a "${CIRCLE_PROJECT_REPONAME}" = "pdns" -a "${CIRCLE_BRANCH}" = "master" ]; then
+ rsync -crv --delete --no-p --chmod=g=rwX --exclude '*~' html-docs/ docs_powerdns_com@${DOCS_HOST}:/recursor/
+ rsync -crv --no-p --chmod=g=rwX --exclude '*~' html-docs.tar.bz2 docs_powerdns_com@${DOCS_HOST}:/recursor/
+ rsync -crv --no-p --chmod=g=rwX --exclude '*~' PowerDNS-Recursor.pdf docs_powerdns_com@${DOCS_HOST}:/recursor/
+ fi
+
+ build-dnsdist-docs:
+ description: Build dnsdist documentation
+ steps:
+ - run:
+ name: autoconf
+ command: |
+ BUILDER_VERSION=0.0.0-git1 autoreconf -vfi
+ working_directory: ~/project/pdns/dnsdistdist
+ - run:
+ name: configure
+ command: |
+ ./configure \
+ --disable-unit-tests \
+ --disable-protobuf
+ working_directory: ~/project/pdns/dnsdistdist
+ - run:
+ name: build docs
+ command: |
+ make html-docs
+ if [ "${CIRCLE_PROJECT_USERNAME}" = "PowerDNS" -a "${CIRCLE_PROJECT_REPONAME}" = "pdns" -a "${CIRCLE_BRANCH}" = "master" ]; then
+ make all-docs
+ fi
+ working_directory: ~/project/pdns/dnsdistdist
+
+ upload-dnsdist-docs:
+ steps:
+ - run:
+ name: Upload documents
+ working_directory: ~/project/pdns/dnsdistdist
+ command: |
+ if [ "${CIRCLE_PROJECT_USERNAME}" = "PowerDNS" -a "${CIRCLE_PROJECT_REPONAME}" = "pdns" -a "${CIRCLE_BRANCH}" = "master" ]; then
+ rsync -crv --delete --no-p --chmod=g=rwX --exclude '*~' html-docs/ dnsdist_org@${DOCS_HOST}:
+ rsync -crv --no-p --chmod=g=rwX --exclude '*~' html-docs.tar.bz2 dnsdist_org@${DOCS_HOST}:
+ rsync -crv --no-p --chmod=g=rwX --exclude '*~' dnsdist.pdf dnsdist_org@${DOCS_HOST}:
+ fi
+
jobs:
build-auth:
docker:
test-auth-regress-odbc-mssql:
docker:
- image: debian:stretch
- - image: mcr.microsoft.com/mssql/server:2019-CTP2.2-ubuntu
+ - image: mcr.microsoft.com/mssql/server:2017-GA-ubuntu
environment:
- ACCEPT_EULA: Y
- SA_PASSWORD: 'SAsa12%%'
PDNSRECURSOR="/opt/pdns-recursor/sbin/pdns_recursor" \
./runtests recursor
+ build-auth-docs:
+ docker:
+ - image: debian:stretch
+ steps:
+ - checkout-shallow
+ - install-doc-deps
+ - build-auth-docs
+
+ deploy-auth-docs:
+ docker:
+ - image: debian:stretch
+ steps:
+ - checkout-shallow
+ - install-doc-deps
+ - build-auth-docs
+ - add-docs-upload-ssh
+ - upload-auth-docs
+
+ build-recursor-docs:
+ docker:
+ - image: debian:stretch
+ steps:
+ - checkout-shallow
+ - install-doc-deps
+ - build-recursor-docs
+
+ deploy-recursor-docs:
+ docker:
+ - image: debian:stretch
+ steps:
+ - checkout-shallow
+ - install-doc-deps
+ - build-recursor-docs
+ - add-docs-upload-ssh
+ - upload-recursor-docs
+
+ build-dnsdist-docs:
+ docker:
+ - image: debian:stretch
+ steps:
+ - checkout-shallow
+ - install-doc-deps
+ - build-dnsdist-docs
+
+ deploy-dnsdist-docs:
+ docker:
+ - image: debian:stretch
+ steps:
+ - checkout-shallow
+ - install-doc-deps
+ - build-dnsdist-docs
+ - add-docs-upload-ssh
+ - upload-dnsdist-docs
+
coverity-auth:
docker:
- image: debian:stretch
- test-recursor-api:
requires:
- build-recursor
+
+ build-docs:
+ jobs:
+ - build-auth-docs:
+ filters:
+ branches:
+ ignore: master
+ - build-recursor-docs:
+ filters:
+ branches:
+ ignore: master
+ - build-dnsdist-docs:
+ filters:
+ branches:
+ ignore: master
+
+ # These actually deploy
+ - deploy-auth-docs:
+ context: docs
+ filters:
+ branches:
+ only: master
+ - deploy-recursor-docs:
+ context: docs
+ filters:
+ branches:
+ only: master
+ - deploy-dnsdist-docs:
+ context: docs
+ filters:
+ branches:
+ only: master
-Subproject commit 4f5ab935098ebacd6b55e89b405bd69e80ab2baf
+Subproject commit 6176c5f68354ca82814ef20a4c87327785157ff3
git status | grep -q clean || DIRTY='.dirty'
# Special environment variable to signal that we are building a release, as this
-# has condequenses for the version number.
+# has consequenses for the version number.
if [ "${IS_RELEASE}" = "YES" ]; then
TAG="$(git describe --tags --exact-match 2> /dev/null | cut -d- -f 2-)"
if [ -n "${TAG}" ]; then
# Generate the version number based on the branch
#
if [ ! -z "$(git rev-parse --abbrev-ref HEAD 2> /dev/null)" ]; then
- GIT_VERSION=""
- if $(git rev-parse --abbrev-ref HEAD | grep -q 'rel/'); then
- REL_TYPE="$(git rev-parse --abbrev-ref HEAD | cut -d/ -f 2 | cut -d- -f 1)"
- GIT_VERSION="$(git describe --match=${REL_TYPE}-* --tags | cut -d- -f2-)"
+ if [ -n "${BUILDER_MODULES}" ]; then
+ match=${BUILDER_MODULES}
+ [ $match = "authoritative" ] && match='auth'
+ [ $match = "recursor" ] && match='rec'
+ GIT_VERSION="$(git describe --match=${match}-* --tags | cut -d- -f2-)"
+ if [ $(echo ${GIT_VERSION} | awk -F"-" '{print NF-1}') = '3' ]; then
+ # A prerelease happened before
+ LAST_TAG="$(echo ${GIT_VERSION} | cut -d- -f1-2)"
+ COMMITS_SINCE_TAG="$(echo ${GIT_VERSION} | cut -d- -f3)"
+ GIT_HASH="$(echo ${GIT_VERSION} | cut -d- -f4)"
+ else
+ LAST_TAG="$(echo ${GIT_VERSION} | cut -d- -f1)"
+ COMMITS_SINCE_TAG="$(echo ${GIT_VERSION} | cut -d- -f2)"
+ GIT_HASH="$(echo ${GIT_VERSION} | cut -d- -f3)"
+ fi
fi
- LAST_TAG="$(echo ${GIT_VERSION} | cut -d- -f1)"
- COMMITS_SINCE_TAG="$(echo ${GIT_VERSION} | cut -d- -f2)"
- GIT_HASH="$(echo ${GIT_VERSION} | cut -d- -f3)"
-
if [ -z "${GIT_VERSION}" ]; then
+ # BUILDER_SUPPORT has more than one product listed, fall back to the 0.0.0 logic
+
# We used 0.0.XXXXgHASH for master in the previous incarnation of our build pipeline.
# This now becomes 0.0.XXXX.0.gHASH, as 0.0.0.XXXX.gHASH (which is more correct)
# would break upgrades for those running master
LAST_TAG=0.0
COMMITS_SINCE_TAG="$(git rev-list --count 12c868770afc20b6cc0da439d881105151d557dd..HEAD 2> /dev/null).0"
[ "${COMMITS_SINCE_TAG}" = ".0" ] && COMMITS_SINCE_TAG=0.0
- GIT_HASH="$(git rev-parse HEAD | cut -c1-10 2> /dev/null)"
+ GIT_HASH="g$(git rev-parse HEAD | cut -c1-10 2> /dev/null)"
fi
BRANCH=".$(git rev-parse --abbrev-ref HEAD | perl -p -e 's/[^[:alnum:]]//g;')"
- [ "${BRANCH}" = ".master" ] && BRANCH=''
TAG="$(git describe --tags --exact-match 2> /dev/null | cut -d- -f 2-)"
if [ -n "${TAG}" ]; then # We're exactly on a tag
fi
fi
- VERSION="${LAST_TAG}.${COMMITS_SINCE_TAG}${BRANCH}.g${GIT_HASH}${DIRTY}"
+ VERSION="${LAST_TAG}.${COMMITS_SINCE_TAG}${BRANCH}.${GIT_HASH}${DIRTY}"
fi
printf $VERSION
-@ 86400 IN SOA pdns-public-ns1.powerdns.com. pieter\.lexis.powerdns.com. 2019060601 10800 3600 604800 10800
+@ 86400 IN SOA pdns-public-ns1.powerdns.com. pieter\.lexis.powerdns.com. 2019061301 10800 3600 604800 10800
@ 3600 IN NS pdns-public-ns1.powerdns.com.
@ 3600 IN NS pdns-public-ns2.powerdns.com.
; Auth
recursor-4.1.11.security-status 60 IN TXT "1 OK"
recursor-4.1.12.security-status 60 IN TXT "1 OK"
recursor-4.1.13.security-status 60 IN TXT "1 OK"
+recursor-4.1.14.security-status 60 IN TXT "1 OK"
recursor-4.2.0-alpha1.security-status 60 IN TXT "1 OK"
recursor-4.2.0-beta1.security-status 60 IN TXT "1 OK"
recursor-4.2.0-rc1.security-status 60 IN TXT "1 OK"
d_asyncFlag = MDB_NOMETASYNC;
else if(syncMode == "mapasync")
d_asyncFlag = MDB_MAPASYNC;
- else if(syncMode.empty())
+ else if(syncMode.empty() || syncMode == "sync")
d_asyncFlag = 0;
else
throw std::runtime_error("Unknown sync mode "+syncMode+" requested for LMDB backend");
void declareArguments(const string &suffix="")
{
declare(suffix,"filename","Filename for lmdb","./pdns.lmdb");
- declare(suffix,"sync-mode","Synchronisation mode: nosync, nometasync, mapasync","mapasync");
+ declare(suffix,"sync-mode","Synchronisation mode: nosync, nometasync, mapasync, sync","mapasync");
// there just is no room for more on 32 bit
declare(suffix,"shards","Records database will be split into this number of shards", (sizeof(long) == 4) ? "2" : "64");
}
try {
reconnectIfNeeded();
+ if (!d_inTransaction) {
+ throw PDNSException("replaceRRSet called outside of transaction");
+ }
+
if (qt != QType::ANY) {
d_DeleteRRSetQuery_stmt->
bind("domain_id", domain_id)->
try {
reconnectIfNeeded();
+ if (inTransaction()) {
+ throw PDNSException("Attempted to start transaction while one was already active (domain '" + domain.toLogString() + "')");
+ }
d_db->startTransaction();
d_inTransaction = true;
if(domain_id >= 0) {
try {
reconnectIfNeeded();
+ if (!d_inTransaction) {
+ throw PDNSException("replaceComments called outside of transaction");
+ }
+
d_DeleteCommentRRsetQuery_stmt->
bind("domain_id",domain_id)->
bind("qname", qname)->
{ "addAction", true, "DNS rule, DNS action [, {uuid=\"UUID\"}]", "add a rule" },
{ "addBPFFilterDynBlocks", true, "addresses, dynbpf[[, seconds=10], msg]", "This is the eBPF equivalent of addDynBlocks(), blocking a set of addresses for (optionally) a number of seconds, using an eBPF dynamic filter" },
{ "addConsoleACL", true, "netmask", "add a netmask to the console ACL" },
- { "addDNSCryptBind", true, "\"127.0.0.1:8443\", \"provider name\", \"/path/to/resolver.cert\", \"/path/to/resolver.key\", {reusePort=false, tcpFastOpenSize=0, interface=\"\", cpus={}}", "listen to incoming DNSCrypt queries on 127.0.0.1 port 8443, with a provider name of `provider name`, using a resolver certificate and associated key stored respectively in the `resolver.cert` and `resolver.key` files. The fifth optional parameter is a table of parameters" },
+ { "addDNSCryptBind", true, "\"127.0.0.1:8443\", \"provider name\", \"/path/to/resolver.cert\", \"/path/to/resolver.key\", {reusePort=false, tcpFastOpenQueueSize=0, interface=\"\", cpus={}}", "listen to incoming DNSCrypt queries on 127.0.0.1 port 8443, with a provider name of `provider name`, using a resolver certificate and associated key stored respectively in the `resolver.cert` and `resolver.key` files. The fifth optional parameter is a table of parameters" },
{ "addDOHLocal", true, "addr, certFile, keyFile [, urls [, vars]]", "listen to incoming DNS over HTTPS queries on the specified address using the specified certificate and key. The last two parameters are tables" },
{ "addDynBlocks", true, "addresses, message[, seconds[, action]]", "block the set of addresses with message `msg`, for `seconds` seconds (10 by default), applying `action` (default to the one set with `setDynBlocksAction()`)" },
{ "addDynBlockSMT", true, "names, msessage[, seconds [, action]]", "block the set of names with message `msg`, for `seconds` seconds (10 by default), applying `action` (default to the one set with `setDynBlocksAction()`)" },
- { "addLocal", true, "addr [, {doTCP=true, reusePort=false, tcpFastOpenSize=0, interface=\"\", cpus={}}]", "add `addr` to the list of addresses we listen on" },
+ { "addLocal", true, "addr [, {doTCP=true, reusePort=false, tcpFastOpenQueueSize=0, interface=\"\", cpus={}}]", "add `addr` to the list of addresses we listen on" },
{ "addCacheHitResponseAction", true, "DNS rule, DNS response action [, {uuid=\"UUID\"}]", "add a cache hit response rule" },
{ "addResponseAction", true, "DNS rule, DNS response action [, {uuid=\"UUID\"}]", "add a response rule" },
{ "addSelfAnsweredResponseAction", true, "DNS rule, DNS response action [, {uuid=\"UUID\"}]", "add a self-answered response rule" },
{ "setECSSourcePrefixV4", true, "prefix-length", "the EDNS Client Subnet prefix-length used for IPv4 queries" },
{ "setECSSourcePrefixV6", true, "prefix-length", "the EDNS Client Subnet prefix-length used for IPv6 queries" },
{ "setKey", true, "key", "set access key to that key" },
- { "setLocal", true, "addr [, {doTCP=true, reusePort=false, tcpFastOpenSize=0, interface=\"\", cpus={}}]", "reset the list of addresses we listen on to this address" },
+ { "setLocal", true, "addr [, {doTCP=true, reusePort=false, tcpFastOpenQueueSize=0, interface=\"\", cpus={}}]", "reset the list of addresses we listen on to this address" },
{ "setMaxTCPClientThreads", true, "n", "set the maximum of TCP client threads, handling TCP connections" },
{ "setMaxTCPConnectionDuration", true, "n", "set the maximum duration of an incoming TCP connection, in seconds. 0 means unlimited" },
{ "setMaxTCPConnectionsPerClient", true, "n", "set the maximum number of TCP connections per client. 0 means unlimited" },
boost::optional<uint16_t> qtype,
boost::optional<bool> suffixMatch) {
if (cache) {
- cache->expungeByName(dname, qtype ? *qtype : QType(QType::ANY).getCode(), suffixMatch ? *suffixMatch : false);
+ g_outputBuffer="Expunged " + std::to_string(cache->expungeByName(dname, qtype ? *qtype : QType(QType::ANY).getCode(), suffixMatch ? *suffixMatch : false)) + " records\n";
}
});
g_lua.registerFunction<void(std::shared_ptr<DNSDistPacketCache>::*)()>("printStats", [](const std::shared_ptr<DNSDistPacketCache> cache) {
}
}
catch(std::exception& e) {
- g_outputBuffer="Error: "+string(e.what())+"\n";
+ g_outputBuffer="Error: "+string(e.what())+"\n";
+ errlog("Error while trying to listen on %s: %s\n", addr, string(e.what()));
}
});
});
g_lua.writeFunction("addDOHLocal", [client](const std::string& addr, boost::variant<std::string, std::vector<std::pair<int,std::string>>> certFiles, boost::variant<std::string, std::vector<std::pair<int,std::string>>> keyFiles, boost::optional<boost::variant<std::string, vector<pair<int, std::string> > > > urls, boost::optional<localbind_t> vars) {
+#ifdef HAVE_DNS_OVER_HTTPS
if (client) {
return;
}
-#ifdef HAVE_DNS_OVER_HTTPS
setLuaSideEffect();
if (g_configurationDone) {
g_outputBuffer="addDOHLocal cannot be used at runtime!\n";
cs->dohFrontend = frontend;
g_frontends.push_back(std::move(cs));
#else
- g_outputBuffer="DNS over HTTPS support is not present!\n";
+ throw std::runtime_error("addDOHLocal() called but DNS over HTTPS support is not present!");
#endif
});
}
catch(const std::exception& e) {
g_outputBuffer="Error while trying to get DOH frontend with index " + std::to_string(index) + ": "+string(e.what())+"\n";
- errlog("Error while trying to get get DOH frontend with index %zu: %s\n", index, string(e.what()));
+ errlog("Error while trying to get DOH frontend with index %zu: %s\n", index, string(e.what()));
}
#else
g_outputBuffer="DNS over HTTPS support is not present!\n";
});
g_lua.writeFunction("addTLSLocal", [client](const std::string& addr, boost::variant<std::string, std::vector<std::pair<int,std::string>>> certFiles, boost::variant<std::string, std::vector<std::pair<int,std::string>>> keyFiles, boost::optional<localbind_t> vars) {
+#ifdef HAVE_DNS_OVER_TLS
if (client)
return;
-#ifdef HAVE_DNS_OVER_TLS
setLuaSideEffect();
if (g_configurationDone) {
g_outputBuffer="addTLSLocal cannot be used at runtime!\n";
g_outputBuffer="Error: "+string(e.what())+"\n";
}
#else
- g_outputBuffer="DNS over TLS support is not present!\n";
+ throw std::runtime_error("addTLSLocal() called but DNS over TLS support is not present!");
#endif
});
/depcomp
/dnsdist.1
/dnslabeltext.cc
+/ext/ipcrypt/Makefile
+/ext/ipcrypt/Makefile.in
/ext/yahttp/Makefile
/ext/yahttp/Makefile.in
/ext/yahttp/yahttp/Makefile
:version: 1.4.0-beta1
:released: 6th of June 2019
- .. change::
+ .. change::
:tags: Bug Fixes, DoH
:pullreq: 7814
:tickets: 7810
* ``doTCP=true``: bool - Also bind on TCP on ``address``.
* ``reusePort=false``: bool - Set the ``SO_REUSEPORT`` socket option.
- * ``tcpFastOpenSize=0``: int - Set the TCP Fast Open queue size, enabling TCP Fast Open when available and the value is larger than 0.
+ * ``tcpFastOpenQueueSize=0``: int - Set the TCP Fast Open queue size, enabling TCP Fast Open when available and the value is larger than 0.
* ``interface=""``: str - Set the network interface to use.
* ``cpus={}``: table - Set the CPU affinity for this listener thread, asking the scheduler to run it on a single CPU id, or a set of CPU ids. This parameter is only available if the OS provides the pthread_setaffinity_np() function.
The default port is 443.
:param str certFile(s): The path to a X.509 certificate file in PEM format, or a list of paths to such files.
:param str keyFile(s): The path to the private key file corresponding to the certificate, or a list of paths to such files, whose order should match the certFile(s) ones.
- :param str or list urls: A base URL, or a list of base URLs, to accept queries on. Any query with a path under one of these will be treated as a DoH query. The default is /.
+ :param str-or-list urls: A base URL, or a list of base URLs, to accept queries on. Any query with a path under one of these will be treated as a DoH query. The default is /.
:param table options: A table with key: value pairs with listen options.
Options:
* ``reusePort=false``: bool - Set the ``SO_REUSEPORT`` socket option.
- * ``tcpFastOpenSize=0``: int - Set the TCP Fast Open queue size, enabling TCP Fast Open when available and the value is larger than 0.
+ * ``tcpFastOpenQueueSize=0``: int - Set the TCP Fast Open queue size, enabling TCP Fast Open when available and the value is larger than 0.
* ``interface=""``: str - Set the network interface to use.
* ``cpus={}``: table - Set the CPU affinity for this listener thread, asking the scheduler to run it on a single CPU id, or a set of CPU ids. This parameter is only available if the OS provides the pthread_setaffinity_np() function.
* ``idleTimeout=30``: int - Set the idle timeout, in seconds.
Options:
* ``reusePort=false``: bool - Set the ``SO_REUSEPORT`` socket option.
- * ``tcpFastOpenSize=0``: int - Set the TCP Fast Open queue size, enabling TCP Fast Open when available and the value is larger than 0.
+ * ``tcpFastOpenQueueSize=0``: int - Set the TCP Fast Open queue size, enabling TCP Fast Open when available and the value is larger than 0.
* ``interface=""``: str - Set the network interface to use.
* ``cpus={}``: table - Set the CPU affinity for this listener thread, asking the scheduler to run it on a single CPU id, or a set of CPU ids. This parameter is only available if the OS provides the pthread_setaffinity_np() function.
* ``provider``: str - The TLS library to use between GnuTLS and OpenSSL, if they were available and enabled at compilation time.
If this function exists, it is called every second to so regular tasks.
This can be used for e.g. :doc:`Dynamic Blocks <../guides/dynblocks>`.
-.. function: setAllowEmptyResponse()
+.. function:: setAllowEmptyResponse()
.. versionadded:: 1.4.0
Set to true (defaults to false) to allow empty responses (qdcount=0) with a NoError or NXDomain rcode (default) from backends. dnsdist drops these responses by default because it can't match them against the initial query since they don't contain the qname, qtype and qclass, and therefore the risk of collision is much higher than with regular responses.
+.. function:: makeIPCipherKey(password) -> string
+
+ .. versionadded:: 1.4.0
+
+ Hashes the password to generate a 16-byte key that can be used to pseudonymize IP addresses with IP cipher.
+
DOHFrontend
~~~~~~~~~~~
* ``doTCP=true``: bool - Also bind on TCP on ``address``.
* ``reusePort=false``: bool - Set the ``SO_REUSEPORT`` socket option.
- * ``tcpFastOpenSize=0``: int - Set the TCP Fast Open queue size, enabling TCP Fast Open when available and the value is larger than 0
+ * ``tcpFastOpenQueueSize=0``: int - Set the TCP Fast Open queue size, enabling TCP Fast Open when available and the value is larger than 0
* ``interface=""``: str - Sets the network interface to use
* ``cpus={}``: table - Set the CPU affinity for this listener thread, asking the scheduler to run it on a single CPU id, or a set of CPU ids. This parameter is only available if the OS provides the pthread_setaffinity_np() function.
.. attribute:: DNSQuestion.qclass
QClass (as an unsigned integer) of this question.
- Can be compared against :ref:`DNSQClass`.
+ Can be compared against :ref:`DNSClass`.
.. attribute:: DNSQuestion.qname
All parameters to ``func`` are integers:
- ``section`` is the section in the packet and can be compared to :ref:`DNSSection`
- - ``qclass`` is the QClass of the record. Can be compared to :ref:`DNSQClass`
+ - ``qclass`` is the QClass of the record. Can be compared to :ref:`DNSClass`
- ``qtype`` is the QType of the record. Can be e.g. compared to ``DNSQType.A``, ``DNSQType.AAAA`` :ref:`constants <DNSQType>` and the like.
- ``ttl`` is the current TTL
Options:
* ``serverID=""``: str - Set the Server Identity field.
- * ``ipEncryptKey=""``: str - A key, that can be generated via the :ref:`makeIPCipherKey` function, to encrypt the IP address of the requestor for anonymization purposes. The encryption is done using ipcrypt for IPv4 and a 128-bit AES ECB operation for IPv6.
+ * ``ipEncryptKey=""``: str - A key, that can be generated via the :func:`makeIPCipherKey` function, to encrypt the IP address of the requestor for anonymization purposes. The encryption is done using ipcrypt for IPv4 and a 128-bit AES ECB operation for IPv6.
.. function:: RemoteLogResponseAction(remoteLogger[, alterFunction[, includeCNAME [, options]]])
Options:
* ``serverID=""``: str - Set the Server Identity field.
- * ``ipEncryptKey=""``: str - A key, that can be generated via the :ref:`makeIPCipherKey` function, to encrypt the IP address of the requestor for anonymization purposes. The encryption is done using ipcrypt for IPv4 and a 128-bit AES ECB operation for IPv6.
+ * ``ipEncryptKey=""``: str - A key, that can be generated via the :func:`makeIPCipherKey` function, to encrypt the IP address of the requestor for anonymization purposes. The encryption is done using ipcrypt for IPv4 and a 128-bit AES ECB operation for IPv6.
.. function:: SetECSAction(v4 [, v6])
return "PONG";
}
-string DLShowHandler(const vector<string>&parts, Utility::pid_t ppid)
-try
-{
- extern StatBag S;
- string ret("Wrong number of parameters");
- if(parts.size()==2) {
- if(parts[1]=="*")
- ret=S.directory();
- else
- ret=S.getValueStr(parts[1]);
- }
+string DLShowHandler(const vector<string>&parts, Utility::pid_t ppid) {
+ try {
+ extern StatBag S;
+ string ret("Wrong number of parameters");
+ if (parts.size() == 2) {
+ if (parts[1] == "*")
+ ret = S.directory();
+ else
+ ret = S.getValueStr(parts[1]);
+ }
- return ret;
-}
-catch(...)
-{
- return "Unknown";
+ return ret;
+ }
+ catch (...) {
+ return "Unknown";
+ }
}
void setStatus(const string &str)
bestwho=dnsp.getRealRemote().getNetwork();
}
else {
+ lua.writeVariable("ecswho", nullptr);
bestwho=dnsp.getRemote();
}
UeberBackend::go();
}
-bool rectifyZone(DNSSECKeeper& dk, const DNSName& zone, bool quiet = false)
+bool rectifyZone(DNSSECKeeper& dk, const DNSName& zone, bool quiet = false, bool rectifyTransaction = true)
{
string output;
string error;
- bool ret = dk.rectifyZone(zone, error, output, true);
+ bool ret = dk.rectifyZone(zone, error, output, rectifyTransaction);
if (!quiet || !ret) {
// When quiet, only print output if there was an error
if (!output.empty()) {
return EXIT_SUCCESS;
}
-int editZone(DNSSECKeeper& dk, const DNSName &zone) {
+int editZone(const DNSName &zone) {
UeberBackend B;
DomainInfo di;
+ DNSSECKeeper dk(&B);
if (! B.getDomainInfo(zone, di)) {
cerr<<"Domain '"<<zone<<"' not found!"<<endl;
else if(changed.empty() || c!='a')
goto reAsk2;
+ di.backend->startTransaction(zone, -1);
for(const auto& change : changed) {
vector<DNSResourceRecord> vrr;
for(const DNSRecord& rr : grouped[change.first]) {
}
di.backend->replaceRRSet(di.id, change.first.first, QType(change.first.second), vrr);
}
- rectifyZone(dk, zone);
+ rectifyZone(dk, zone, false, false);
+ di.backend->commitTransaction();
return EXIT_SUCCESS;
}
rr.domain_id = di.id;
rr.qname = name;
DNSResourceRecord oldrr;
+
+ di.backend->startTransaction(zone, -1);
+
if(addOrReplace) { // the 'add' case
di.backend->lookup(rr.qtype, rr.qname, 0, di.id);
di.backend->replaceRRSet(di.id, name, rr.qtype, newrrs);
// need to be explicit to bypass the ueberbackend cache!
di.backend->lookup(rr.qtype, name, 0, di.id);
+ di.backend->commitTransaction();
cout<<"New rrset:"<<endl;
while(di.backend->get(rr)) {
cout<<rr.qname.toString()<<" "<<rr.ttl<<" IN "<<rr.qtype.getName()<<" "<<rr.content<<endl;
name=DNSName(name_)+zone;
QType qt(QType::chartocode(type_.c_str()));
+ di.backend->startTransaction(zone, -1);
di.backend->replaceRRSet(di.id, name, qt, vector<DNSResourceRecord>());
+ di.backend->commitTransaction();
return EXIT_SUCCESS;
}
if(cmds[1]==".")
cmds[1].clear();
- exit(editZone(dk, DNSName(cmds[1])));
+ exit(editZone(DNSName(cmds[1])));
}
else if(cmds[0] == "clear-zone") {
if(cmds.size() != 2) {
}
declareStats();
+ S.blacklist("special-memory-usage");
+
DLOG(g_log<<Logger::Warning<<"Verbose logging in effect"<<endl);
showProductVersion();
====================
.. changelog::
+ :version: 4.1.14
+ :released: 13th of June 2019
+
+ .. change::
+ :tags: Improvements
+ :pullreq: 7906
+
+ Add statistics counters for AD and CD queries.
+
+ :tags: Bug Fixes
+ :pullreq: 7912
+
+ Add missing getregisteredname Lua function
+
:version: 4.1.13
:released: 21st of May 2019
g_luaconfs.setState(luaconfsCopy);
size_t queriesCount = 0;
+ const time_t fixedNow = sr->getNow().tv_sec;
- sr->setAsyncCallback([target,&queriesCount,keys](const ComboAddress& ip, const DNSName& domain, int type, bool doTCP, bool sendRDQuery, int EDNS0Level, struct timeval* now, boost::optional<Netmask>& srcmask, boost::optional<const ResolveContext&> context, LWResult* res, bool* chained) {
+ sr->setAsyncCallback([target,&queriesCount,keys,fixedNow](const ComboAddress& ip, const DNSName& domain, int type, bool doTCP, bool sendRDQuery, int EDNS0Level, struct timeval* now, boost::optional<Netmask>& srcmask, boost::optional<const ResolveContext&> context, LWResult* res, bool* chained) {
queriesCount++;
DNSName auth = domain;
addRecordToLW(res, domain, QType::SOA, "pdns-public-ns1.powerdns.com. pieter\\.lexis.powerdns.com. 2017032301 10800 3600 604800 3600", DNSResourceRecord::AUTHORITY, 3600);
addRRSIG(keys, res->d_records, domain, 300);
addNSECRecordToLW(domain, DNSName("z."), { QType::NSEC, QType::RRSIG }, 600, res->d_records);
- addRRSIG(keys, res->d_records, domain, 1);
+ addRRSIG(keys, res->d_records, domain, 1, false, boost::none, boost::none, fixedNow);
return 1;
}
BOOST_CHECK_EQUAL(sr->getValidationState(), Indeterminate);
BOOST_REQUIRE_EQUAL(ret.size(), 4);
BOOST_CHECK_EQUAL(queriesCount, 1);
- /* check that the entry has not been negatively cached */
+ /* check that the entry has been negatively cached */
const NegCache::NegCacheEntry* ne = nullptr;
BOOST_CHECK_EQUAL(SyncRes::t_sstorage.negcache.size(), 1);
BOOST_REQUIRE_EQUAL(SyncRes::t_sstorage.negcache.get(target, QType(QType::A), sr->getNow(), &ne), true);
return rc;
}
+static string SSQLite3ErrorString(sqlite3 *db)
+{
+ return string(sqlite3_errmsg(db)+string(" (")+std::to_string(sqlite3_extended_errcode(db))+string(")"));
+}
+
class SSQLite3Statement: public SSqlStatement
{
public:
// failed.
releaseStatement();
if (d_rc == SQLITE_CANTOPEN)
- throw SSqlException(string("CANTOPEN error in sqlite3, often caused by unwritable sqlite3 db *directory*: ")+string(sqlite3_errmsg(d_db->db())));
- throw SSqlException(string("Error while retrieving SQLite query results: ")+string(sqlite3_errmsg(d_db->db())));
+ throw SSqlException(string("CANTOPEN error in sqlite3, often caused by unwritable sqlite3 db *directory*: ")+SSQLite3ErrorString(d_db->db()));
+ throw SSqlException(string("Error while retrieving SQLite query results: ")+SSQLite3ErrorString(d_db->db()));
}
if(d_dolog)
g_log<<Logger::Warning<< "Query "<<((long)(void*)this)<<": "<<d_dtime.udiffNoReset()<<" usec to execute"<<endl;
#endif
{
releaseStatement();
- throw SSqlException(string("Unable to compile SQLite statement : '")+d_query+"': "+sqlite3_errmsg(d_db->db()));
+ throw SSqlException(string("Unable to compile SQLite statement : '")+d_query+"': "+SSQLite3ErrorString(d_db->db()));
}
if (pTail && strlen(pTail)>0)
g_log<<Logger::Warning<<"Sqlite3 command partially processed. Unprocessed part: "<<pTail<<endl;
ostringstream o;
for(const auto& i: d_stats) {
+ if (d_blacklist.find(i.first) != d_blacklist.end())
+ continue;
o<<i.first<<"="<<*(i.second)<<",";
}
for(const funcstats_t::value_type& val : d_funcstats) {
+ if (d_blacklist.find(val.first) != d_blacklist.end())
+ continue;
o << val.first<<"="<<val.second(val.first)<<",";
}
dir=o.str();
vector<string> ret;
for(const auto& i: d_stats) {
- ret.push_back(i.first);
+ if (d_blacklist.find(i.first) != d_blacklist.end())
+ continue;
+ ret.push_back(i.first);
}
for(const funcstats_t::value_type& val : d_funcstats) {
+ if (d_blacklist.find(val.first) != d_blacklist.end())
+ continue;
ret.push_back(val.first);
}
return d_rings.count(name) || d_comborings.count(name) || d_dnsnameqtyperings.count(name);
}
+void StatBag::blacklist(const string& str) {
+ d_blacklist.insert(str);
+}
+
template class StatRing<std::string, CIStringCompare>;
template class StatRing<SComboAddress>;
template class StatRing<std::tuple<DNSName, QType> >;
typedef map<string, func_t> funcstats_t;
funcstats_t d_funcstats;
bool d_doRings;
+ std::set<string> d_blacklist;
public:
StatBag(); //!< Naked constructor. You need to declare keys before this class becomes useful
AtomicCounter *getPointer(const string &key); //!< get a direct pointer to the value behind a key. Use this for high performance increments
string getValueStr(const string &key); //!< read a value behind a key, and return it as a string
string getValueStrZero(const string &key); //!< read a value behind a key, and return it as a string, and zero afterwards
+ void blacklist(const string &str);
};
inline void StatBag::deposit(const string &key, int value)
uint16_t type;
DNSResourceRecord::Place place;
bool operator<(const CacheKey& rhs) const {
- return tie(name, type, place) < tie(rhs.name, rhs.type, rhs.place);
+ return tie(type, place, name) < tie(rhs.type, rhs.place, rhs.name);
}
};
typedef map<CacheKey, CacheEntry> tcache_t;
// com. We then store that and keep querying the other backends in case one
// of them has a more specific zone but don't bother asking this specific
// backend again for b.c.example.com., c.example.com. and example.com.
- // If a backend has no match it may respond with an enmpty qname.
+ // If a backend has no match it may respond with an empty qname.
bool found = false;
int cstat;
DLOG(g_log<<Logger::Error<<"lookup: "<<shorter<<endl);
if((*i)->getAuth(shorter, sd)) {
DLOG(g_log<<Logger::Error<<"got: "<<sd->qname<<endl);
+ if(!shorter.isPartOf(sd->qname) && !sd->qname.empty()) {
+ throw PDNSException("getAuth() returned an SOA for the wrong zone. Zone '"+sd->qname.toLogString()+"' is not part of '"+shorter.toLogString()+"'");
+ }
j->first = sd->qname.wirelength();
j->second = *sd;
if(sd->qname == shorter) {
for(vector<DNSBackend *>::const_iterator i=backends.begin();i!=backends.end();++i)
if((*i)->getSOA(domain, sd)) {
+ if(domain != sd.qname) {
+ throw PDNSException("getSOA() returned an SOA for the wrong zone. Question: '"+domain.toLogString()+"', answer: '"+sd.qname.toLogString()+"'");
+ }
if(d_cache_ttl) {
DNSZoneRecord rr;
rr.dr.d_name = sd.qname;
+ "capable backends are loaded, or because the backends have DNSSEC disabled. Check your configuration.");
}
-static void updateDomainSettingsFromDocument(UeberBackend& B, const DomainInfo& di, const DNSName& zonename, const Json document) {
+static void updateDomainSettingsFromDocument(UeberBackend& B, const DomainInfo& di, const DNSName& zonename, const Json document, bool rectifyTransaction=true) {
vector<string> zonemaster;
bool shouldRectify = false;
for(auto value : document["masters"].array_items()) {
if (api_rectify == "1") {
string info;
string error_msg;
- if (!dk.rectifyZone(zonename, error_msg, info, true)) {
+ if (!dk.rectifyZone(zonename, error_msg, info, rectifyTransaction)) {
throw ApiException("Failed to rectify '" + zonename.toString() + "' " + error_msg);
}
}
if(!B.getDomainInfo(zonename, di))
throw ApiException("Creating domain '"+zonename.toString()+"' failed: lookup of domain ID failed");
+ di.backend->startTransaction(zonename, di.id);
+
// updateDomainSettingsFromDocument does NOT fill out the default we've established above.
if (!soa_edit_api_kind.empty()) {
di.backend->setDomainMetadataOne(zonename, "SOA-EDIT-API", soa_edit_api_kind);
}
- di.backend->startTransaction(zonename, di.id);
-
for(auto rr : new_records) {
rr.domain_id = di.id;
di.backend->feedRecord(rr, DNSName());
di.backend->feedComment(c);
}
- updateDomainSettingsFromDocument(B, di, zonename, document);
+ updateDomainSettingsFromDocument(B, di, zonename, document, false);
di.backend->commitTransaction();
if(req->method == "PUT") {
// update domain settings
- updateDomainSettingsFromDocument(B, di, zonename, req->json());
+ di.backend->startTransaction(zonename, -1);
+ updateDomainSettingsFromDocument(B, di, zonename, req->json(), false);
+ di.backend->commitTransaction();
resp->body = "";
resp->status = 204; // No Content, but indicate success