libldap-2.4-2 \
liblmdb0 \
libluajit-5.1-2 \
+ libp11-kit0 \
libpq5 \
libssl1.1 \
libsodium23 \
libsystemd0 \
default-libmysqlclient-dev \
- unixodbc
+ unixodbc \
+ softhsm2
install-dnsdist-deps:
description: "Install all libraries needed for testing dnsdist"
libldap2-dev \
liblmdb-dev \
libluajit-5.1-dev \
+ libp11-kit-dev \
libpq-dev \
libsodium-dev \
libsqlite3-dev \
--enable-unit-tests \
--enable-backend-unit-tests \
--enable-fuzz-targets \
+ --enable-experimental-pkcs11 \
--with-lmdb=/usr \
--with-libsodium \
--prefix=/opt/pdns-auth \
steps:
- auth-regress-setup
- run:
- command: apt-get install -qq -y sqlite3
+ command: apt-get install -qq -y sqlite3 p11-kit softhsm2
- auth-regress:
context: bind-both
- auth-regress:
context: bind-dnssec-nsec3-optout-both
- auth-regress:
context: bind-dnssec-nsec3-narrow
+ - auth-regress:
+ context: bind-dnssec-pkcs11
- run:
command: apt-get install -qq -y default-mysql-client
- run:
policyname
pollmplexer
Ponomarev
+poolers
poolname
portnum
portnumber
modules/gsqlite3backend/3.4.0_to_4.0.0_schema.sqlite3.sql
modules/gsqlite3backend/4.0.0_to_4.2.0_schema.sqlite3.sql
modules/gsqlite3backend/4.2.0_to_4.3.0_schema.sqlite3.sql
+modules/gsqlite3backend/4.3.0_to_4.3.1_schema.sqlite3.sql
modules/gsqlite3backend/dnssec-3.x_to_3.4.0_schema.sqlite3.sql
modules/gsqlite3backend/nodnssec-3.x_to_3.4.0_schema.sqlite3.sql
modules/gsqlite3backend/schema.sqlite3.sql
modules/gsqlite3backend/3.4.0_to_4.0.0_schema.sqlite3.sql
modules/gsqlite3backend/4.0.0_to_4.2.0_schema.sqlite3.sql
modules/gsqlite3backend/4.2.0_to_4.3.0_schema.sqlite3.sql
+modules/gsqlite3backend/4.3.0_to_4.3.1_schema.sqlite3.sql
modules/gsqlite3backend/dnssec-3.x_to_3.4.0_schema.sqlite3.sql
modules/gsqlite3backend/nodnssec-3.x_to_3.4.0_schema.sqlite3.sql
modules/gsqlite3backend/schema.sqlite3.sql
modules/gsqlite3backend/3.4.0_to_4.0.0_schema.sqlite3.sql
modules/gsqlite3backend/4.0.0_to_4.2.0_schema.sqlite3.sql
modules/gsqlite3backend/4.2.0_to_4.3.0_schema.sqlite3.sql
+modules/gsqlite3backend/4.3.0_to_4.3.1_schema.sqlite3.sql
modules/gsqlite3backend/dnssec-3.x_to_3.4.0_schema.sqlite3.sql
modules/gsqlite3backend/nodnssec-3.x_to_3.4.0_schema.sqlite3.sql
modules/gsqlite3backend/schema.sqlite3.sql
%doc modules/gsqlite3backend/3.4.0_to_4.0.0_schema.sqlite3.sql
%doc modules/gsqlite3backend/4.0.0_to_4.2.0_schema.sqlite3.sql
%doc modules/gsqlite3backend/4.2.0_to_4.3.0_schema.sqlite3.sql
+%doc modules/gsqlite3backend/4.3.0_to_4.3.1_schema.sqlite3.sql
%{_libdir}/%{name}/libgsqlite3backend.so
%if 0%{?rhel} >= 7
documentation <https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS>`__.
Default: "".
+.. _setting-gpgsql-prepared-statements:
+
+``gpgsql-prepared-statements``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Prepare statements for better performance, instead of sending parameterized queries.
+Might not work with connection poolers.
+Default: yes.
+
+.. versionadded:: 4.4.0
+
Default schema
--------------
.. note::
This feature is experimental, use at your own risk!
-.. deprecated:: 4.0.0
- slot IDs are deprecated, and you are expected to use slot label instead
+To enable it, compile PowerDNS Authoritative Server using ``--enable-experimental-pkcs11`` flag on configure.
+This requires you to have the p11-kit libraries and headers.
-To enable it, compile PowerDNS Authoritative Server using
-``--enable-experimental-pkcs11`` flag on configure. This requires you to
-have p11-kit libraries and headers.
+You can also log on to the tokens after starting the server, in this case you need to edit your PKCS#11 cryptokey record and remove PIN or set it empty.
+Do this after assigning/creating a key, as the PIN is required for assigning keys to zone.
-You can also log on to the tokens after starting server, in this case
-you need to edit your PKCS#11 cryptokey record and remove PIN or set it
-empty. PIN is required for assigning keys to zone.
-
-Using with SoftHSM
-------------------
+Using PKCS#11 with SoftHSM
+--------------------------
.. warning::
- Due to an interaction between `SoftHSM and Botan <https://github.com/PowerDNS/pdns/issues/2496>`__,
- the PowerDNS Authoritative Server **will most likely** crash on exit when built with ``--enable-botan1.10 --enable-experimental-pkcs11``.
+ Due to an interaction between `SoftHSM and Botan <https://github.com/PowerDNS/pdns/issues/2496>`__, the PowerDNS Authoritative Server **will most likely** crash on exit when built with ``--enable-botan1.10 --enable-experimental-pkcs11``.
In 4.2.0, Botan support has been removed and this is no longer an issue.
-To test this feature, a software HSM can be used. It is **not
-recommended** to use this in production.
-
-Instructions on how to setup SoftHSM to work with the feature after
-compilation on Ubuntu/Debian (tested with Ubuntu 12.04 and 14.04).
-
-- ``apt-get install softhsm p11-kit opensc``
-- create directory ``/etc/pkcs11/modules``
-- create a file ``softhsm`` (``softhsm.module`` on newer versions),
- with contents:::
+To test this feature, a software HSM can be used.
+It is **not recommended** to do this in production.
- module: /home/cmouse/softhsm/lib/softhsm/libsofthsm.so managed: yes
+These instructions have been tested on Debian 10 (Buster).
-- Verify that it works: ``p11-kit -l``
-- Create at least two tokens (ksk and zsk) with (slot-number starts from 0)::
+- ``apt-get install softhsm p11-kit``
+- Verify that it works: ``p11-kit -l``, you should see ``softhsm2: .....``
+- Create a token::
- sudo softhsm --init-token --slot slot-number --label zone-ksk|zone-zsk --pin some-pin --so-pin another-pin
+ softhsm2-util --init-token --label my-pkcs11-dnskey --free --pin 1234 --so-pin 1234
-- Using pkcs11-tool, initialize your new keys.::
+- Assign the token to a zone (it says KSK, but because there is no ZSK, this will become a CSK)::
- sudo pkcs11-tool --module=/home/cmouse/softhsm/lib/softhsm/libsofthsm.so -l -p some-pin -k --key-type RSA:2048 -a zone-ksk|zone-zsk --slot-index slot-number
+ pdnsutil hsm assign example.com ecdsa256 ksk softhsm2 my-pkcs11-dnskey 1234 'my key' 'my pub key'
-- Assign the keys using (note that token label is not necessarily same
- as object label, see p11-kit -l)::
+- Create the key (for 25, use the ID shown by the previous command)::
- pdnsutil hsm assign zone rsasha256 ksk|zsk softhsm token-label pin zone-ksk|zsk
+ pdnsutil hsm create-key example.com 25
- Verify that everything worked, you should see valid data there::
- pdnsutil show-zone zone
-
-- SoftHSM signatures are fast enough to be used in live environment.
+ pdnsutil show-zone example.com
Using CryptAS
-------------
{
try {
setDB(new SPgSQL(getArg("dbname"),
- getArg("host"),
- getArg("port"),
- getArg("user"),
- getArg("password"),
- getArg("extra-connection-parameters")));
+ getArg("host"),
+ getArg("port"),
+ getArg("user"),
+ getArg("password"),
+ getArg("extra-connection-parameters"),
+ mustDo("prepared-statements")));
}
catch(SSqlException &e) {
declare(suffix,"port","Database backend port to connect to","");
declare(suffix,"password","Database backend password to connect with","");
declare(suffix,"extra-connection-parameters", "Extra parameters to add to connection string","");
+ declare(suffix,"prepared-statements", "Use prepared statements instead of parameterized queries", "yes");
declare(suffix,"dnssec","Enable DNSSEC processing","no");
class SPgSQLStatement: public SSqlStatement
{
public:
- SPgSQLStatement(const string& query, bool dolog, int nparams, SPgSQL* db) {
+ SPgSQLStatement(const string& query, bool dolog, int nparams, SPgSQL* db, unsigned int nstatement) {
d_query = query;
d_dolog = dolog;
d_parent = db;
- d_prepared = false;
d_nparams = nparams;
- d_res = NULL;
- d_res_set = NULL;
- paramValues = NULL;
- paramLengths = NULL;
- d_paridx = 0;
- d_residx = 0;
- d_resnum = 0;
- d_fnum = 0;
- d_cur_set = 0;
+ d_nstatement = nstatement;
}
SSqlStatement* bind(const string& name, bool value) { return bind(name, string(value ? "t" : "f")); }
SSqlStatement* execute() {
prepareStatement();
if (d_dolog) {
- g_log<<Logger::Warning<< "Query "<<((long)(void*)this)<<": " << d_query << endl;
+ g_log<<Logger::Warning<< "Query "<<((long)(void*)this)<<": Statement: " << d_query << endl;
+ if (d_paridx) {
+ // Log message is similar, bot not exactly the same as the postgres server log.
+ std::stringstream log_message;
+ log_message<< "Query "<<((long)(void*)this)<<": Parameters: ";
+ for (int i = 0; i < d_paridx; i++) {
+ if (i != 0) {
+ log_message << ", ";
+ }
+ log_message << "$" << (i + 1) << " = '" << paramValues[i] << "'";
+ }
+ g_log<<Logger::Warning<< log_message.str() << endl;
+ }
d_dtime.set();
}
- d_res_set = PQexecParams(d_db(), d_query.c_str(), d_nparams, NULL, paramValues, paramLengths, NULL, 0);
+ if (!d_stmt.empty()) {
+ d_res_set = PQexecPrepared(d_db(), d_stmt.c_str(), d_nparams, paramValues, paramLengths, nullptr, 0);
+ } else {
+ d_res_set = PQexecParams(d_db(), d_query.c_str(), d_nparams, nullptr, paramValues, paramLengths, nullptr, 0);
+ }
ExecStatusType status = PQresultStatus(d_res_set);
if (status != PGRES_COMMAND_OK && status != PGRES_TUPLES_OK && status != PGRES_NONFATAL_ERROR) {
string errmsg(PQresultErrorMessage(d_res_set));
}
void nextResult() {
- if (d_res_set == NULL) return; // no refcursor
+ if (d_res_set == nullptr) return; // no refcursor
if (d_cur_set >= PQntuples(d_res_set)) {
PQclear(d_res_set);
- d_res_set = NULL;
+ d_res_set = nullptr;
return;
}
// this code handles refcursors if they are returned
#if PG_VERSION_NUM > 90000
// PQescapeIdentifier was added to libpq in postgresql 9.0
char *val = PQgetvalue(d_res_set, d_cur_set++, 0);
- char *portal = PQescapeIdentifier(d_db(), val, strlen(val));
+ char *portal = PQescapeIdentifier(d_db(), val, strlen(val));
string cmd = string("FETCH ALL FROM \"") + string(portal) + string("\"");
PQfreemem(portal);
#else
string cmd = string("FETCH ALL FROM \"") + portal + string("\"");
#endif
// execute FETCH
- if (d_dolog)
+ if (d_dolog) {
g_log<<Logger::Warning<<"Query: "<<cmd<<endl;
+ }
d_res = PQexec(d_db(),cmd.c_str());
d_resnum = PQntuples(d_res);
- d_fnum = PQnfields(d_res);
d_residx = 0;
} else {
d_res = d_res_set;
- d_res_set = NULL;
+ d_res_set = nullptr;
d_resnum = PQntuples(d_res);
- d_fnum = PQnfields(d_res);
}
}
d_residx++;
if (d_residx >= d_resnum) {
PQclear(d_res);
- d_res = NULL;
+ d_res = nullptr;
nextResult();
}
return this;
SSqlStatement* getResult(result_t& result) {
result.clear();
- if (d_res == NULL) return this;
+ if (d_res == nullptr) return this;
result.reserve(d_resnum);
row_t row;
while(hasNextRow()) { nextRow(row); result.push_back(std::move(row)); }
SSqlStatement* reset() {
int i;
- if (d_res)
+ if (d_res) {
PQclear(d_res);
- if (d_res_set)
+ }
+ if (d_res_set) {
PQclear(d_res_set);
- d_res_set = NULL;
- d_res = NULL;
+ }
+ d_res_set = nullptr;
+ d_res = nullptr;
d_paridx = d_residx = d_resnum = 0;
- if (paramValues)
- for(i=0;i<d_nparams;i++)
- if (paramValues[i]) delete [] paramValues[i];
+ if (paramValues) {
+ for(i=0;i<d_nparams;i++) {
+ if (paramValues[i]) {
+ delete [] paramValues[i];
+ }
+ }
+ }
delete [] paramValues;
- paramValues = NULL;
+ paramValues = nullptr;
delete [] paramLengths;
- paramLengths = NULL;
+ paramLengths = nullptr;
return this;
}
void releaseStatement() {
d_prepared = false;
reset();
+ if (!d_stmt.empty()) {
+ string cmd = string("DEALLOCATE " + d_stmt);
+ PGresult *res = PQexec(d_db(), cmd.c_str());
+ PQclear(res);
+ d_stmt.clear();
+ }
}
void prepareStatement() {
if (d_prepared) return;
- paramValues=NULL;
- d_cur_set=d_paridx=d_residx=d_resnum=d_fnum=0;
- paramLengths=NULL;
- d_res=NULL;
- d_res_set=NULL;
+ if (d_parent->usePrepared()) {
+ // prepare a statement; name must be unique per session (using d_nstatement to ensure this).
+ this->d_stmt = string("stmt") + std::to_string(d_nstatement);
+ PGresult* res = PQprepare(d_db(), d_stmt.c_str(), d_query.c_str(), d_nparams, nullptr);
+ ExecStatusType status = PQresultStatus(res);
+ string errmsg(PQresultErrorMessage(res));
+ PQclear(res);
+ if (status != PGRES_COMMAND_OK && status != PGRES_TUPLES_OK && status != PGRES_NONFATAL_ERROR) {
+ releaseStatement();
+ throw SSqlException("Fatal error during prePQpreparepare: " + d_query + string(": ") + errmsg);
+ }
+ }
+ paramValues = nullptr;
+ paramLengths = nullptr;
+ d_cur_set = d_paridx = d_residx = d_resnum = 0;
+ d_res = nullptr;
+ d_res_set = nullptr;
d_prepared = true;
}
void allocate() {
- if (paramValues != NULL) return;
+ if (paramValues != nullptr) return;
paramValues = new char*[d_nparams];
paramLengths = new int[d_nparams];
memset(paramValues, 0, sizeof(char*)*d_nparams);
}
string d_query;
+ string d_stmt;
SPgSQL *d_parent;
- PGresult *d_res_set;
- PGresult *d_res;
+ PGresult *d_res_set{nullptr};
+ PGresult *d_res{nullptr};
bool d_dolog;
DTime d_dtime; // only used if d_dolog is set
- bool d_prepared;
+ bool d_prepared{false};
int d_nparams;
- int d_paridx;
- char **paramValues;
- int *paramLengths;
- int d_residx;
- int d_resnum;
- int d_fnum;
- int d_cur_set;
+ int d_paridx{0};
+ char **paramValues{nullptr};
+ int *paramLengths{nullptr};
+ int d_residx{0};
+ int d_resnum{0};
+ int d_cur_set{0};
+ unsigned int d_nstatement;
};
bool SPgSQL::s_dolog;
SPgSQL::SPgSQL(const string &database, const string &host, const string& port, const string &user,
- const string &password, const string &extra_connection_parameters)
+ const string &password, const string &extra_connection_parameters, const bool use_prepared)
{
- d_db=0;
+ d_db = nullptr;
d_in_trx = false;
- d_connectstr="";
+ d_connectstr = "";
+ d_nstatements = 0;
if (!database.empty())
d_connectstr+="dbname="+database;
d_connectstr+=" password="+password;
}
+ d_use_prepared = use_prepared;
+
d_db=PQconnectdb(d_connectstr.c_str());
if (!d_db || PQstatus(d_db)==CONNECTION_BAD) {
std::unique_ptr<SSqlStatement> SPgSQL::prepare(const string& query, int nparams)
{
- return std::unique_ptr<SSqlStatement>(new SPgSQLStatement(query, s_dolog, nparams, this));
+ d_nstatements++;
+ return std::unique_ptr<SSqlStatement>(new SPgSQLStatement(query, s_dolog, nparams, this, d_nstatements));
}
void SPgSQL::startTransaction() {
public:
SPgSQL(const string &database, const string &host="", const string& port="",
const string &user="", const string &password="",
- const string &extra_connection_parameters="");
+ const string &extra_connection_parameters="", const bool use_prepared = true);
~SPgSQL();
PGconn* db() { return d_db; }
bool in_trx() const { return d_in_trx; }
+ bool usePrepared() { return d_use_prepared; }
private:
PGconn* d_db;
string d_connectlogstr;
static bool s_dolog;
bool d_in_trx;
+ bool d_use_prepared;
+ unsigned int d_nstatements;
};
--- /dev/null
+CREATE INDEX records_lookup_idx ON records(name, type);
+CREATE INDEX records_lookup_id_idx ON records(domain_id, name, type);
+CREATE INDEX records_order_idx ON records(domain_id, ordername);
+
+DROP INDEX IF EXISTS rec_name_index;
+DROP INDEX IF EXISTS nametype_index;
+DROP INDEX IF EXISTS domain_id;
+DROP INDEX IF EXISTS orderindex;
+
+CREATE INDEX comments_idx ON comments(domain_id, name, type);
+
+DROP INDEX IF EXISTS comments_domain_id_index;
+DROP INDEX IF EXISTS comments_nametype_index;
+
+ANALYZE;
3.4.0_to_4.0.0_schema.sqlite3.sql \
4.0.0_to_4.2.0_schema.sqlite3.sql \
4.2.0_to_4.3.0_schema.sqlite3.sql \
+ 4.3.0_to_4.3.1_schema.sqlite3.sql \
schema.sqlite3.sql
libgsqlite3backend_la_SOURCES = gsqlite3backend.cc gsqlite3backend.hh
FOREIGN KEY(domain_id) REFERENCES domains(id) ON DELETE CASCADE ON UPDATE CASCADE
);
-CREATE INDEX rec_name_index ON records(name);
-CREATE INDEX nametype_index ON records(name,type);
-CREATE INDEX domain_id ON records(domain_id);
-CREATE INDEX orderindex ON records(ordername);
+CREATE INDEX records_lookup_idx ON records(name, type);
+CREATE INDEX records_lookup_id_idx ON records(domain_id, name, type);
+CREATE INDEX records_order_idx ON records(domain_id, ordername);
CREATE TABLE supermasters (
FOREIGN KEY(domain_id) REFERENCES domains(id) ON DELETE CASCADE ON UPDATE CASCADE
);
-CREATE INDEX comments_domain_id_index ON comments (domain_id);
-CREATE INDEX comments_nametype_index ON comments (name, type);
+CREATE INDEX comments_idx ON comments(domain_id, name, type);
CREATE INDEX comments_order_idx ON comments (domain_id, modified_at);
$(AM_V_GEN)sed -e 's!/pdns_server!& --config-name=%i!' \
-e 's!Authoritative Server!& %i!' \
-e 's!RuntimeDirectory=.*!&-%i!' \
+ -e 's!SyslogIdentifier=.*!&-%i!' \
< $< > $@
systemdsystemunitdir = $(SYSTEMD_DIR)
toTrim -= totErased;
- while (toTrim > 0) {
+ while (true) {
size_t pershard = toTrim / maps_size + 1;
for (auto& mc : maps) {
const typename C::lock l(mc);
totErased++;
toTrim--;
if (toTrim == 0) {
- break;
+ return totErased;
}
}
}
}
+ // Not reached
return totErased;
}
frontend->d_trustForwardedForHeader = boost::get<bool>((*vars)["trustForwardedForHeader"]);
}
+ if (vars->count("internalPipeBufferSize")) {
+ frontend->d_internalPipeBufferSize = boost::get<int>((*vars)["internalPipeBufferSize"]);
+ }
+
parseTLSConfig(frontend->d_tlsConfig, "addDOHLocal", vars);
}
g_dohlocals.push_back(frontend);
{ "dyn-blocked", MetricDefinition(PrometheusMetricType::counter, "Number of queries dropped because of a dynamic block")},
{ "dyn-block-nmg-size", MetricDefinition(PrometheusMetricType::gauge, "Number of dynamic blocks entries") },
{ "security-status", MetricDefinition(PrometheusMetricType::gauge, "Security status of this software. 0=unknown, 1=OK, 2=upgrade recommended, 3=upgrade mandatory") },
+ { "doh-query-pipe-full", MetricDefinition(PrometheusMetricType::counter, "Number of DoH queries dropped because the internal pipe used to distribute queries was full") },
+ { "doh-response-pipe-full", MetricDefinition(PrometheusMetricType::counter, "Number of DoH responses dropped because the internal pipe used to distribute responses was full") },
{ "udp-in-errors", MetricDefinition(PrometheusMetricType::counter, "From /proc/net/snmp InErrors") },
{ "udp-noport-errors", MetricDefinition(PrometheusMetricType::counter, "From /proc/net/snmp NoPorts") },
{ "udp-recvbuf-errors", MetricDefinition(PrometheusMetricType::counter, "From /proc/net/snmp RcvbufErrors") },
#ifdef HAVE_DNS_OVER_HTTPS
// DoH query
du->response = std::string(response, responseLen);
- if (send(du->rsock, &du, sizeof(du), 0) != sizeof(du)) {
+ static_assert(sizeof(du) <= PIPE_BUF, "Writes up to PIPE_BUF are guaranteed not to be interleaved and to either fully succeed or fail");
+ ssize_t sent = write(du->rsock, &du, sizeof(du));
+ if (sent != sizeof(du)) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ ++g_stats.dohResponsePipeFull;
+ vinfolog("Unable to pass a DoH response to the DoH worker thread because the pipe is full");
+ }
+ else {
+ vinfolog("Unable to pass a DoH response to the DoH worker thread because we couldn't write to the pipe: %s", stringerror());
+ }
+
/* at this point we have the only remaining pointer on this
DOHUnit object since we did set ids->du to nullptr earlier,
except if we got the response before the pointer could be
stat_t cacheMisses{0};
stat_t latency0_1{0}, latency1_10{0}, latency10_50{0}, latency50_100{0}, latency100_1000{0}, latencySlow{0}, latencySum{0};
stat_t securityStatus{0};
+ stat_t dohQueryPipeFull{0};
+ stat_t dohResponsePipeFull{0};
double latencyAvg100{0}, latencyAvg1000{0}, latencyAvg10000{0}, latencyAvg1000000{0};
typedef std::function<uint64_t(const std::string&)> statfunction_t;
{"dyn-blocked", &dynBlocked},
{"dyn-block-nmg-size", [](const std::string&) { return g_dynblockNMG.getLocal()->size(); }},
{"security-status", &securityStatus},
+ {"doh-query-pipe-full", &dohQueryPipeFull},
+ {"doh-response-pipe-full", &dohResponsePipeFull},
// Latency histogram
{"latency-sum", &latencySum},
{"latency-count", getLatencyCount},
.. versionadded:: 1.4.0
.. versionchanged:: 1.5.0
- ``sendCacheControlHeaders``, ``sessionTimeout``, ``trustForwardedForHeader`` options added.
+ ``internalPipeBufferSize``, ``sendCacheControlHeaders``, ``sessionTimeout``, ``trustForwardedForHeader`` options added.
``url`` now defaults to ``/dns-query`` instead of ``/``. Added ``tcpListenQueueSize`` parameter.
Listen on the specified address and TCP port for incoming DNS over HTTPS connections, presenting the specified X.509 certificate.
* ``sendCacheControlHeaders``: bool - Whether to parse the response to find the lowest TTL and set a HTTP Cache-Control header accordingly. Default is true.
* ``trustForwardedForHeader``: bool - Whether to parse any existing X-Forwarded-For header in the HTTP query and use the right-most value as the client source address and port, for ACL checks, rules, logging and so on. Default is false.
* ``tcpListenQueueSize=SOMAXCONN``: int - Set the size of the listen queue. Default is ``SOMAXCONN``.
+ * ``internalPipeBufferSize=0``: int - Set the size in bytes of the internal buffer of the pipes used internally to pass queries and responses between threads. Requires support for ``F_SETPIPE_SZ`` which is present in Linux since 2.6.35. The actual size might be rounded up to a multiple of a page size. 0 means that the OS default size is used.
.. function:: addTLSLocal(address, certFile(s), keyFile(s) [, options])
dnsdist worker thread which we also launched.
This dnsdist worker thread injects the query into the normal dnsdist flow
- (as a datagram over a socketpair). The response also goes back over a
- (different) socketpair, where we pick it up and deliver it back to h2o.
+ (over a pipe). The response also goes back over a (different) pipe,
+ where we pick it up and deliver it back to h2o.
For coordination, we use the h2o socket multiplexer, which is sensitive to our
- socketpair too.
+ pipe too.
*/
/* h2o notes.
// through the bowels of h2o
struct DOHServerConfig
{
- DOHServerConfig(uint32_t idleTimeout): accept_ctx(new DOHAcceptContext)
+ DOHServerConfig(uint32_t idleTimeout, uint32_t internalPipeBufferSize): accept_ctx(new DOHAcceptContext)
{
- if(socketpair(AF_LOCAL, SOCK_DGRAM, 0, dohquerypair) < 0) {
- unixDie("Creating a socket pair for DNS over HTTPS");
+ int fd[2];
+ if (pipe(fd) < 0) {
+ unixDie("Creating a pipe for DNS over HTTPS");
}
+ dohquerypair[0] = fd[1];
+ dohquerypair[1] = fd[0];
- if (socketpair(AF_LOCAL, SOCK_DGRAM, 0, dohresponsepair) < 0) {
+ if (pipe(fd) < 0) {
close(dohquerypair[0]);
close(dohquerypair[1]);
- unixDie("Creating a socket pair for DNS over HTTPS");
+ unixDie("Creating a pipe for DNS over HTTPS");
}
+ dohresponsepair[0] = fd[1];
+ dohresponsepair[1] = fd[0];
+
+ setNonBlocking(dohquerypair[0]);
+ if (internalPipeBufferSize > 0) {
+ setPipeBufferSize(dohquerypair[0], internalPipeBufferSize);
+ }
+
+ setNonBlocking(dohresponsepair[0]);
+ if (internalPipeBufferSize > 0) {
+ setPipeBufferSize(dohresponsepair[0], internalPipeBufferSize);
+ }
+
+ setNonBlocking(dohresponsepair[1]);
+
h2o_config_init(&h2o_config);
h2o_config.http2.idle_timeout = idleTimeout * 1000;
}
/* increase the ref counter before sending the pointer */
oldDU->get();
- if (send(oldDU->rsock, &oldDU, sizeof(oldDU), 0) != sizeof(oldDU)) {
+
+ static_assert(sizeof(oldDU) <= PIPE_BUF, "Writes up to PIPE_BUF are guaranteed not to be interleaved and to either fully succeed or fail");
+ ssize_t sent = write(oldDU->rsock, &oldDU, sizeof(oldDU));
+ if (sent != sizeof(oldDU)) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ ++g_stats.dohResponsePipeFull;
+ vinfolog("Unable to pass a DoH timeout to the DoH worker thread because the pipe is full");
+ }
+ else {
+ vinfolog("Unable to pass a DoH timeout to the DoH worker thread because we couldn't write to the pipe: %s", stringerror());
+ }
+
oldDU->release();
}
+
oldDU->release();
oldDU = nullptr;
}
}
/* increase the ref counter before sending the pointer */
du->get();
- if (send(du->rsock, &du, sizeof(du), 0) != sizeof(du)) {
+
+ static_assert(sizeof(du) <= PIPE_BUF, "Writes up to PIPE_BUF are guaranteed not to be interleaved and to either fully succeed or fail");
+ ssize_t sent = write(du->rsock, &du, sizeof(du));
+ if (sent != sizeof(du)) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ ++g_stats.dohResponsePipeFull;
+ vinfolog("Unable to pass a DoH self-answered response to the DoH worker thread because the pipe is full");
+ }
+ else {
+ vinfolog("Unable to pass a DoH self-answered to the DoH worker thread because we couldn't write to the pipe: %s", stringerror());
+ }
+
du->release();
}
return 0;
auto ptr = du.release();
*(ptr->self) = ptr;
try {
- if(send(dsc->dohquerypair[0], &ptr, sizeof(ptr), 0) != sizeof(ptr)) {
+ static_assert(sizeof(ptr) <= PIPE_BUF, "Writes up to PIPE_BUF are guaranteed not to be interleaved and to either fully succeed or fail");
+ ssize_t sent = write(dsc->dohquerypair[0], &ptr, sizeof(ptr));
+ if (sent != sizeof(ptr)) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ ++g_stats.dohQueryPipeFull;
+ vinfolog("Unable to pass a DoH query to the DoH worker thread because the pipe is full");
+ }
+ else {
+ vinfolog("Unable to pass a DoH query to the DoH worker thread because we couldn't write to the pipe: %s", stringerror());
+ }
ptr->release();
ptr = nullptr;
h2o_send_error_500(req, "Internal Server Error", "Internal Server Error", 0);
h2o_socket_t* sock = req->conn->callbacks->get_socket(req->conn);
ComboAddress remote;
ComboAddress local;
- h2o_socket_getpeername(sock, reinterpret_cast<struct sockaddr*>(&remote));
+
+ if (h2o_socket_getpeername(sock, reinterpret_cast<struct sockaddr*>(&remote)) == 0) {
+ /* getpeername failed, likely because the connection has already been closed,
+ but anyway that means we can't get the remote address, which could allow an ACL bypass */
+ h2o_send_error_500(req, getReasonFromStatusCode(500).c_str(), "Internal Server Error - Unable to get remote address", 0);
+ return 0;
+ }
+
h2o_socket_getsockname(sock, reinterpret_cast<struct sockaddr*>(&local));
DOHServerConfig* dsc = reinterpret_cast<DOHServerConfig*>(req->conn->ctx->storage.entries[0].data);
/* query has been parsed by h2o, which called doh_handler() in the main DoH thread.
In order not to blockfor long, doh_handler() called doh_dispatch_query() which allocated
a DOHUnit object and passed it to us */
-static void dnsdistclient(int qsock, int rsock)
+static void dnsdistclient(int qsock)
{
setThreadName("dnsdist/doh-cli");
for(;;) {
try {
DOHUnit* du = nullptr;
- ssize_t got = recv(qsock, &du, sizeof(du), 0);
+ ssize_t got = read(qsock, &du, sizeof(du));
if (got < 0) {
warnlog("Error receiving internal DoH query: %s", strerror(errno));
continue;
// so we can use UDP to talk to the backend.
auto dh = const_cast<struct dnsheader*>(reinterpret_cast<const struct dnsheader*>(du->query.c_str()));
- if(!dh->arcount) {
+ if (!dh->arcount) {
std::string res;
generateOptRR(std::string(), res, 4096, 0, false);
// we leave existing EDNS in place
}
- if(processDOHQuery(du) < 0) {
+ if (processDOHQuery(du) < 0) {
du->status_code = 500;
/* increase the ref count before sending the pointer */
du->get();
- if(send(du->rsock, &du, sizeof(du), 0) != sizeof(du)) {
- du->release(); // XXX but now what - will h2o time this out for us?
+
+ static_assert(sizeof(du) <= PIPE_BUF, "Writes up to PIPE_BUF are guaranteed not to be interleaved and to either fully succeed or fail");
+ ssize_t sent = write(du->rsock, &du, sizeof(du));
+ if (sent != sizeof(du)) {
+ if (errno == EAGAIN || errno == EWOULDBLOCK) {
+ ++g_stats.dohResponsePipeFull;
+ vinfolog("Unable to pass a DoH internal error to the DoH worker thread because the pipe is full");
+ }
+ else {
+ vinfolog("Unable to pass a DoH internal error to the DoH worker thread because we couldn't write to the pipe: %s", stringerror());
+ }
+
+ // XXX but now what - will h2o time this out for us?
+ du->release();
}
}
du->release();
{
DOHUnit *du = nullptr;
DOHServerConfig* dsc = reinterpret_cast<DOHServerConfig*>(listener->data);
- ssize_t got = recv(dsc->dohresponsepair[1], &du, sizeof(du), 0);
+ ssize_t got = read(dsc->dohresponsepair[1], &du, sizeof(du));
if (got < 0) {
warnlog("Error reading a DOH internal response: %s", strerror(errno));
return;
}
- if(!du->req) { // it got killed in flight
+ if (!du->req) { // it got killed in flight
// cout << "du "<<(void*)du<<" came back from dnsdist, but it was killed"<<endl;
du->release();
return;
{
registerOpenSSLUser();
- d_dsc = std::make_shared<DOHServerConfig>(d_idleTimeout);
+ d_dsc = std::make_shared<DOHServerConfig>(d_idleTimeout, d_internalPipeBufferSize);
if (!d_tlsConfig.d_certKeyPairs.empty()) {
try {
dsc->h2o_config.server_name = h2o_iovec_init(df->d_serverTokens.c_str(), df->d_serverTokens.size());
- std::thread dnsdistThread(dnsdistclient, dsc->dohquerypair[1], dsc->dohresponsepair[0]);
+ std::thread dnsdistThread(dnsdistclient, dsc->dohquerypair[1]);
dnsdistThread.detach(); // gets us better error reporting
setThreadName("dnsdist/doh");
HTTPVersionStats d_http1Stats;
HTTPVersionStats d_http2Stats;
+ uint32_t d_internalPipeBufferSize{0};
bool d_sendCacheControlHeaders{true};
bool d_trustForwardedForHeader{false};
std::unordered_set<std::string> d_tags;
std::string d_name;
Priority d_priority{maximumPriority};
+ bool d_policyOverridesGettag{true};
};
struct Policy
return notSet;
}
+ bool policyOverridesGettag() const {
+ if (d_zoneData) {
+ return d_zoneData->d_policyOverridesGettag;
+ }
+ return true;
+ }
+
std::vector<DNSRecord> getCustomRecords(const DNSName& qname, uint16_t qtype) const;
std::vector<DNSRecord> getRecords(const DNSName& qname) const;
{
d_zoneData->d_tags = std::move(tags);
}
+ void setPolicyOverridesGettag(bool flag)
+ {
+ d_zoneData->d_policyOverridesGettag = flag;
+ }
const std::string& getName() const
{
return d_zoneData->d_name;
void setPriority(Priority p) {
d_zoneData->d_priority = p;
}
+
private:
static DNSName maskToRPZ(const Netmask& nm);
static bool findExactNamedPolicy(const std::unordered_map<DNSName, DNSFilterEngine::Policy>& polmap, const DNSName& qname, DNSFilterEngine::Policy& pol);
return RCode::Refused;
}
try {
- db->createSlaveDomain(p.getRemote().toString(), p.qdomain, nameserver, account);
+ db->createSlaveDomain(remote.toString(), p.qdomain, nameserver, account);
if (tsigkeyname.empty() == false) {
vector<string> meta;
meta.push_back(tsigkeyname.toStringNoDot());
[Service]
ExecStart=@sbindir@/pdns_server --guardian=no --daemon=no --disable-syslog --log-timestamp=no --write-pid=no
+SyslogIdentifier=pdns_server
User=@service_user@
Group=@service_group@
Type=notify
sr.setCacheOnly();
}
- if (dc->d_rcode != boost::none) {
- /* we have a response ready to go, most likely from gettag_ffi */
- ret = std::move(dc->d_records);
- res = *dc->d_rcode;
- if (res == RCode::NoError && dc->d_followCNAMERecords) {
- res = followCNAMERecords(ret, QType(dc->d_mdp.d_qtype));
- }
- goto haveAnswer;
- }
-
if (t_pdl) {
t_pdl->prerpz(dq, res);
}
}
}
+ // If we are doing RPZ and a policy was matched, it normally takes precedence over an answer from gettag.
+ // So process the gettag_ffi answer only if no RPZ action was matched or the policy indicates gettag should
+ // have precedence.
+ if (!wantsRPZ || !appliedPolicy.policyOverridesGettag() || appliedPolicy.d_type == DNSFilterEngine::PolicyType::None) {
+ if (dc->d_rcode != boost::none) {
+ /* we have a response ready to go, most likely from gettag_ffi */
+ ret = std::move(dc->d_records);
+ res = *dc->d_rcode;
+ if (res == RCode::NoError && dc->d_followCNAMERecords) {
+ res = followCNAMERecords(ret, QType(dc->d_mdp.d_qtype));
+ }
+ goto haveAnswer;
+ }
+ }
+
// if there is a RecursorLua active, and it 'took' the query in preResolve, we don't launch beginResolve
if (!t_pdl || !t_pdl->preresolve(dq, res)) {
t_allowFrom = g_initialAllowFrom;
t_udpclientsocks = std::unique_ptr<UDPClientSocks>(new UDPClientSocks());
t_tcpClientCounts = std::unique_ptr<tcpClientCounts_t>(new tcpClientCounts_t());
- primeHints();
+ if (threadInfo.isHandler) {
+ primeHints();
+ g_log<<Logger::Warning<<"Done priming cache with root hints"<<endl;
+ }
t_packetCache = std::unique_ptr<RecursorPacketCache>(new RecursorPacketCache());
- g_log<<Logger::Warning<<"Done priming cache with root hints"<<endl;
#ifdef NOD_ENABLED
if (threadInfo.isWorker)
while (!RecursorControlChannel::stop) {
while(MT->schedule(&g_now)); // MTasker letting the mthreads do their thing
- if(!(counter%500)) {
+ // Use primes, it avoid not being scheduled in cases where the counter has a regular pattern.
+ // We want to call handler thread often, it gets scheduled about 2 times per second
+ if ((threadInfo.isHandler && counter % 11 == 0) || counter % 499 == 0) {
MT->makeThread(houseKeeping, 0);
}
cout<<"ID = "<<value.second.id<<" ("<<DNSSECKeeper::keyTypeToString(value.second.keyType)<<")";
}
if (value.first.getKey()->getBits() < 1) {
- cerr<<" <key missing or defunct>" <<endl;
+ cout<<" <key missing or defunct, perhaps you should run pdnsutil hsm create-key>" <<endl;
continue;
}
if (!exportDS) {
return 1;
}
- cerr << "Key of size " << bits << " created" << std::endl;
+ cerr << "Key of size " << dke->getBits() << " created" << std::endl;
return 0;
}
#else
static CK_FUNCTION_LIST** p11_modules;
#endif
+#define ECDSA256_PARAMS "\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07"
+#define ECDSA384_PARAMS "\x06\x05\x2b\x81\x04\x00\x22"
+
// map for signing algorithms
static std::map<unsigned int,CK_MECHANISM_TYPE> dnssec2smech = boost::assign::map_list_of
(5, CKM_SHA1_RSA_PKCS)
(13, CKM_SHA256)
(14, CKM_SHA384);
+static std::map<unsigned int,CK_MECHANISM_TYPE> dnssec2cmech = boost::assign::map_list_of
+(5, CKM_RSA_PKCS_KEY_PAIR_GEN)
+(7, CKM_RSA_PKCS_KEY_PAIR_GEN)
+(8, CKM_RSA_PKCS_KEY_PAIR_GEN)
+(10, CKM_RSA_PKCS_KEY_PAIR_GEN)
+(13, CKM_ECDSA_KEY_PAIR_GEN)
+(14, CKM_ECDSA_KEY_PAIR_GEN);
+
typedef enum { Attribute_Byte, Attribute_Long, Attribute_String } CkaValueType;
// Attribute handling
void logError(const std::string& operation) const {
if (d_err) {
- std::string msg = boost::str( boost::format("PKCS#11 operation %s failed: %s (0x%X)") % operation % p11_kit_strerror(d_err) % d_err );
+ std::string msg = boost::str( boost::format("PKCS#11 operation %s failed: %s (0x%X) (%s)") % operation % p11_kit_strerror(d_err) % d_err % p11_kit_message() );
g_log<<Logger::Error<< msg << endl;
}
}
void logError(const std::string& operation) const {
if (d_err) {
- std::string msg = boost::str( boost::format("PKCS#11 operation %s failed: %s (0x%X)") % operation % p11_kit_strerror(d_err) % d_err );
+ std::string msg = boost::str( boost::format("PKCS#11 operation %s failed: %s (0x%X) (%s)") % operation % p11_kit_strerror(d_err) % d_err % p11_kit_message());
g_log<<Logger::Error<< msg << endl;
}
}
return bits;
#else
- if (d_ecdsa_params == "\x06\x08\x2a\x86\x48\xce\x3d\x03\x01\x07") return 256;
- else if (d_ecdsa_params == "\x06\x05\x2b\x81\x04\x00\x22") return 384;
+ if (d_ecdsa_params == ECDSA256_PARAMS) return 256;
+ else if (d_ecdsa_params == ECDSA384_PARAMS) return 384;
else throw PDNSException("Unsupported EC key");
#endif
}
std::string pubExp("\000\001\000\001", 4); // 65537
- pubAttr.push_back(P11KitAttribute(CKA_CLASS, (unsigned long)CKO_PUBLIC_KEY));
- pubAttr.push_back(P11KitAttribute(CKA_KEY_TYPE, (unsigned long)CKK_RSA));
- pubAttr.push_back(P11KitAttribute(CKA_TOKEN, (char)CK_TRUE));
- pubAttr.push_back(P11KitAttribute(CKA_ENCRYPT, (char)CK_TRUE));
- pubAttr.push_back(P11KitAttribute(CKA_VERIFY, (char)CK_TRUE));
- pubAttr.push_back(P11KitAttribute(CKA_WRAP, (char)CK_TRUE));
- pubAttr.push_back(P11KitAttribute(CKA_MODULUS_BITS, (unsigned long)bits));
- pubAttr.push_back(P11KitAttribute(CKA_PUBLIC_EXPONENT, pubExp));
- pubAttr.push_back(P11KitAttribute(CKA_LABEL, d_pub_label));
-
- privAttr.push_back(P11KitAttribute(CKA_CLASS, (unsigned long)CKO_PRIVATE_KEY));
- privAttr.push_back(P11KitAttribute(CKA_KEY_TYPE, (unsigned long)CKK_RSA));
- privAttr.push_back(P11KitAttribute(CKA_TOKEN, (char)CK_TRUE));
- privAttr.push_back(P11KitAttribute(CKA_PRIVATE, (char)CK_TRUE));
-// privAttr.push_back(P11KitAttribute(CKA_SUBJECT, "CN=keygen"));
- privAttr.push_back(P11KitAttribute(CKA_ID, "\x01\x02\x03\x04")); // this is mandatory if you want to export anything
- privAttr.push_back(P11KitAttribute(CKA_SENSITIVE, (char)CK_TRUE));
- privAttr.push_back(P11KitAttribute(CKA_DECRYPT, (char)CK_TRUE));
- privAttr.push_back(P11KitAttribute(CKA_SIGN, (char)CK_TRUE));
- privAttr.push_back(P11KitAttribute(CKA_UNWRAP, (char)CK_TRUE));
- privAttr.push_back(P11KitAttribute(CKA_LABEL, d_label));
-
- mech.mechanism = CKM_RSA_PKCS_KEY_PAIR_GEN;
+ try {
+ mech.mechanism = dnssec2cmech.at(d_algorithm);
+ } catch (std::out_of_range& e) {
+ throw PDNSException("pkcs11: unsupported algorithm "+std::to_string(d_algorithm)+ " for key pair generation");
+ }
+
mech.pParameter = NULL;
mech.ulParameterLen = 0;
+ if (mech.mechanism == CKM_RSA_PKCS_KEY_PAIR_GEN) {
+ pubAttr.push_back(P11KitAttribute(CKA_CLASS, (unsigned long)CKO_PUBLIC_KEY));
+ pubAttr.push_back(P11KitAttribute(CKA_KEY_TYPE, (unsigned long)CKK_RSA));
+ pubAttr.push_back(P11KitAttribute(CKA_TOKEN, (char)CK_TRUE));
+ pubAttr.push_back(P11KitAttribute(CKA_ENCRYPT, (char)CK_TRUE));
+ pubAttr.push_back(P11KitAttribute(CKA_VERIFY, (char)CK_TRUE));
+ pubAttr.push_back(P11KitAttribute(CKA_WRAP, (char)CK_TRUE));
+ pubAttr.push_back(P11KitAttribute(CKA_MODULUS_BITS, (unsigned long)bits));
+ pubAttr.push_back(P11KitAttribute(CKA_PUBLIC_EXPONENT, pubExp));
+ pubAttr.push_back(P11KitAttribute(CKA_LABEL, d_pub_label));
+
+ privAttr.push_back(P11KitAttribute(CKA_CLASS, (unsigned long)CKO_PRIVATE_KEY));
+ privAttr.push_back(P11KitAttribute(CKA_KEY_TYPE, (unsigned long)CKK_RSA));
+ privAttr.push_back(P11KitAttribute(CKA_TOKEN, (char)CK_TRUE));
+ privAttr.push_back(P11KitAttribute(CKA_PRIVATE, (char)CK_TRUE));
+ // privAttr.push_back(P11KitAttribute(CKA_SUBJECT, "CN=keygen"));
+ privAttr.push_back(P11KitAttribute(CKA_ID, "\x01\x02\x03\x04")); // this is mandatory if you want to export anything
+ privAttr.push_back(P11KitAttribute(CKA_SENSITIVE, (char)CK_TRUE));
+ privAttr.push_back(P11KitAttribute(CKA_DECRYPT, (char)CK_TRUE));
+ privAttr.push_back(P11KitAttribute(CKA_SIGN, (char)CK_TRUE));
+ privAttr.push_back(P11KitAttribute(CKA_UNWRAP, (char)CK_TRUE));
+ privAttr.push_back(P11KitAttribute(CKA_LABEL, d_label));
+ } else if (mech.mechanism == CKM_ECDSA_KEY_PAIR_GEN) {
+ pubAttr.push_back(P11KitAttribute(CKA_CLASS, (unsigned long)CKO_PUBLIC_KEY));
+ pubAttr.push_back(P11KitAttribute(CKA_KEY_TYPE, (unsigned long)CKK_ECDSA));
+ pubAttr.push_back(P11KitAttribute(CKA_TOKEN, (char)CK_TRUE));
+ pubAttr.push_back(P11KitAttribute(CKA_ENCRYPT, (char)CK_TRUE));
+ pubAttr.push_back(P11KitAttribute(CKA_VERIFY, (char)CK_TRUE));
+ pubAttr.push_back(P11KitAttribute(CKA_WRAP, (char)CK_TRUE));
+ pubAttr.push_back(P11KitAttribute(CKA_LABEL, d_pub_label));
+ if (d_algorithm == 13) pubAttr.push_back(P11KitAttribute(CKA_ECDSA_PARAMS, ECDSA256_PARAMS));
+ else if (d_algorithm == 14) pubAttr.push_back(P11KitAttribute(CKA_ECDSA_PARAMS, ECDSA384_PARAMS));
+ else throw PDNSException("pkcs11: unknown algorithm "+std::to_string(d_algorithm)+" for ECDSA key pair generation");
+
+ privAttr.push_back(P11KitAttribute(CKA_CLASS, (unsigned long)CKO_PRIVATE_KEY));
+ privAttr.push_back(P11KitAttribute(CKA_KEY_TYPE, (unsigned long)CKK_ECDSA));
+ privAttr.push_back(P11KitAttribute(CKA_TOKEN, (char)CK_TRUE));
+ privAttr.push_back(P11KitAttribute(CKA_PRIVATE, (char)CK_TRUE));
+ // privAttr.push_back(P11KitAttribute(CKA_SUBJECT, "CN=keygen"));
+ privAttr.push_back(P11KitAttribute(CKA_ID, "\x01\x02\x03\x04")); // this is mandatory if you want to export anything
+ privAttr.push_back(P11KitAttribute(CKA_SENSITIVE, (char)CK_TRUE));
+ privAttr.push_back(P11KitAttribute(CKA_DECRYPT, (char)CK_TRUE));
+ privAttr.push_back(P11KitAttribute(CKA_SIGN, (char)CK_TRUE));
+ privAttr.push_back(P11KitAttribute(CKA_UNWRAP, (char)CK_TRUE));
+ privAttr.push_back(P11KitAttribute(CKA_LABEL, d_label));
+ } else {
+ throw PDNSException("pkcs11: don't know how make key for algorithm "+std::to_string(d_algorithm));
+ }
+
+
if (d_slot->GenerateKeyPair(&mech, pubAttr, privAttr, &pubKey, &privKey)) {
throw PDNSException("Keypair generation failed");
}
typedef std::unordered_map<std::string, boost::variant<bool, uint32_t, std::string, std::vector<std::pair<int, std::string>> > > rpzOptions_t;
-static void parseRPZParameters(rpzOptions_t& have, std::string& polName, boost::optional<DNSFilterEngine::Policy>& defpol, bool& defpolOverrideLocal, uint32_t& maxTTL, size_t& zoneSizeHint, std::unordered_set<std::string>& tags)
+static void parseRPZParameters(rpzOptions_t& have, std::string& polName, boost::optional<DNSFilterEngine::Policy>& defpol, bool& defpolOverrideLocal, uint32_t& maxTTL, size_t& zoneSizeHint, std::unordered_set<std::string>& tags, bool& overridesGettag)
{
if(have.count("policyName")) {
polName = boost::get<std::string>(have["policyName"]);
tags.insert(tag.second);
}
}
+ if (have.count("overridesGettag")) {
+ overridesGettag = boost::get<bool>(have["overridesGettag"]);
+ }
}
#if HAVE_PROTOBUF
std::string polName("rpzFile");
std::shared_ptr<DNSFilterEngine::Zone> zone = std::make_shared<DNSFilterEngine::Zone>();
uint32_t maxTTL = std::numeric_limits<uint32_t>::max();
+ bool overridesGettag = true;
if(options) {
auto& have = *options;
size_t zoneSizeHint = 0;
std::unordered_set<std::string> tags;
- parseRPZParameters(have, polName, defpol, defpolOverrideLocal, maxTTL, zoneSizeHint, tags);
+ parseRPZParameters(have, polName, defpol, defpolOverrideLocal, maxTTL, zoneSizeHint, tags, overridesGettag);
if (zoneSizeHint > 0) {
zone->reserve(zoneSizeHint);
}
}
g_log<<Logger::Warning<<"Loading RPZ from file '"<<filename<<"'"<<endl;
zone->setName(polName);
+ zone->setPolicyOverridesGettag(overridesGettag);
loadRPZFromFile(filename, zone, defpol, defpolOverrideLocal, maxTTL);
lci.dfe.addZone(zone);
g_log<<Logger::Warning<<"Done loading RPZ from file '"<<filename<<"'"<<endl;
auto& have = *options;
size_t zoneSizeHint = 0;
std::unordered_set<std::string> tags;
- parseRPZParameters(have, polName, defpol, defpolOverrideLocal, maxTTL, zoneSizeHint, tags);
+ bool overridesGettag = true;
+ parseRPZParameters(have, polName, defpol, defpolOverrideLocal, maxTTL, zoneSizeHint, tags, overridesGettag);
if (zoneSizeHint > 0) {
zone->reserve(zoneSizeHint);
}
zone->setTags(std::move(tags));
+ zone->setPolicyOverridesGettag(overridesGettag);
if(have.count("tsigname")) {
tt.name=DNSName(toLower(boost::get<string>(have["tsigname"])));
I found a bug!
^^^^^^^^^^^^^^
As much as we'd like to think we are perfect, bugs happen.
-If you have found a bug, please file a bug report on `GitHub <https://github.com/PowerDNS/pdns/issues/new?template=bug_report.md>`_.
+If you have found a bug, please file a bug report on `GitHub bug report <https://github.com/PowerDNS/pdns/issues/new?template=bug_report.md>`_.
Please fill in the template and we'll try our best to help you.
I found a security issue!
I have a good idea for a feature!
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We like to work on new things!
-You can file a feature request on `GitHub <https://github.com/PowerDNS/pdns/issues/new?template=feature_request.md>`_.
+You can file a feature request on `GitHub feature request <https://github.com/PowerDNS/pdns/issues/new?template=feature_request.md>`_.
List of tags as string, that will be added to the policy tags exported over protobuf when a policy of this zone matches.
+overridesGettag
+^^^^^^^^^^^^^^^
+.. versionadded:: 4.4.0
+
+`gettag_ffi` can set an answer to a query.
+By default an RPZ hit overrides this answer, unless this option is set to `false`.
+The default is `true`.
+
zoneSizeHint
^^^^^^^^^^^^
An indication of the number of expected entries in the zone, speeding up the loading of huge zones by reserving space in advance.
Maximum number of simultaneous TCP clients.
--max-tcp-per-client=<num>
If set, maximum number of TCP sessions per client (IP address).
---query-local-address=<address>[,address...]
+--query-local-address=<address[,address...]>
Use *address* as Source IP address when sending queries.
--quiet
Suppress logging of questions and answers.
Using the Webserver
^^^^^^^^^^^^^^^^^^^
-The :doc:`API <http-api/index>` exposes a statistics endpoint at :http:get:`/api/v1/servers/:server_id/statistics`.
+The :doc:`API <http-api/index>` exposes a statistics endpoint at
+
+.. http:get:: /api/v1/servers/:server_id/statistics
+
This endpoint exports all statistics in a single JSON document.
Using ``rec_control``
qname-min-fallback-success
^^^^^^^^^^^^^^^^^^^^^^^^^^
.. versionadded:: 4.3.0
-number of successful queries due to fallback mechanism within :ref:`qname-minimization` setting.
+
+number of successful queries due to fallback mechanism within :ref:`setting-qname-minimization` setting.
ecs-queries
^^^^^^^^^^^
The minimum value of this setting is 15. i.e. setting this to lower than 15 will make this value 15.
-.. _setting max-concurrent-requests-per-tcp-connection:
+.. _setting-max-concurrent-requests-per-tcp-connection:
``max-concurrent-requests-per-tcp-connection``
----------------------------------------------
If set, PowerDNS will use distinct threads to listen to client sockets and distribute that work to worker-threads using a hash of the query.
This feature should maximize the cache hit ratio.
-To use more than one thread set `distributor-threads`_setting in version 4.2.0 or newer.
+To use more than one thread set `distributor-threads` in version 4.2.0 or newer.
Enabling should improve performance for medium sized resolvers.
.. _setting-protobuf-use-kernel-timestamp:
The value between the hooks is a UUID that is generated for each request. This can be used to find all lines related to a single request.
.. note::
- The webserver logs these line on the NOTICE level. The :ref:`settings-loglevel` seting must be 5 or higher for these lines to end up in the log.
+ The webserver logs these line on the NOTICE level. The :ref:`setting-loglevel` seting must be 5 or higher for these lines to end up in the log.
.. _setting-webserver-password:
New settings
^^^^^^^^^^^^
-- The :ref:`allow-trust-anchor-query` setting has been added. This setting controls if negative trust anchors can be queried. The default is `no`.
-- The :ref:`max-concurrent-requests-per-tcp-connection` has been added. This setting controls how many requests are handled concurrently per incoming TCP connection. The default is 10.
-- The :ref:`max-generate-steps` setting has been added. This sets the maximum number of steps that will be performed when loading a BIND zone with the ``$GENERATE`` directive. The default is 0, which is unlimited.
-- The :ref:`nothing-below-nxdomain` setting has been added. This setting controls the way cached NXDOMAIN replies imply non-existence of a whole subtree. The default is `dnssec` which means that only DNSSEC validated NXDOMAINS results are used.
-- The :ref:`qname-minimization` setting has been added. This options controls if QName Minimization is used. The default is `yes`.
+- The :ref:`setting-allow-trust-anchor-query` setting has been added. This setting controls if negative trust anchors can be queried. The default is `no`.
+- The :ref:`setting-max-concurrent-requests-per-tcp-connection` has been added. This setting controls how many requests are handled concurrently per incoming TCP connection. The default is 10.
+- The :ref:`setting-max-generate-steps` setting has been added. This sets the maximum number of steps that will be performed when loading a BIND zone with the ``$GENERATE`` directive. The default is 0, which is unlimited.
+- The :ref:`setting-nothing-below-nxdomain` setting has been added. This setting controls the way cached NXDOMAIN replies imply non-existence of a whole subtree. The default is `dnssec` which means that only DNSSEC validated NXDOMAINS results are used.
+- The :ref:`setting-qname-minimization` setting has been added. This options controls if QName Minimization is used. The default is `yes`.
4.1.x to 4.2.0
--------------
static bool scanForCNAMELoop(const DNSName& name, const vector<DNSRecord>& records)
{
- for (const auto record: records) {
+ for (const auto& record: records) {
if (record.d_type == QType::CNAME && record.d_place == DNSResourceRecord::ANSWER) {
if (name == record.d_name) {
return true;
DNSSECKeeper dk(&B);
- if (!dk.isSecuredZone(zonename))
- throw ApiException("Zone '" + zonename.toString() + "' is not DNSSEC signed, not rectifying.");
-
- if (di.kind == DomainInfo::Slave)
- throw ApiException("Zone '" + zonename.toString() + "' is a slave zone, not rectifying.");
+ if (dk.isPresigned(zonename))
+ throw ApiException("Zone '" + zonename.toString() + "' is pre-signed, not rectifying.");
string error_msg = "";
string info;
dbrecs = get_db_records(name, 'AAAA')
self.assertIsNone(dbrecs[0]['ordername'])
+ def test_explicit_rectify_success(self):
+ name, _, data = self.create_zone = self.create_zone(api_rectify=False, dnssec=True, nsec3param='1 0 1 ab')
+ dbrecs = get_db_records(name, 'SOA')
+ self.assertIsNone(dbrecs[0]['ordername'])
+ r = self.session.put(self.url("/api/v1/servers/localhost/zones/" + data['id'] + "/rectify"))
+ self.assertEquals(r.status_code, 200)
+ dbrecs = get_db_records(name, 'SOA')
+ self.assertIsNotNone(dbrecs[0]['ordername'])
+
+ def test_explicit_rectify_slave(self):
+ # Some users want to move a zone to kind=Slave and then rectify, without a re-transfer.
+ name, _, data = self.create_zone = self.create_zone(api_rectify=False, dnssec=True, nsec3param='1 0 1 ab')
+ r = self.session.put(self.url("/api/v1/servers/localhost/zones/" + data['id']),
+ data=json.dumps({'kind': 'Slave'}),
+ headers={'content-type': 'application/json'})
+ self.assertEquals(r.status_code, 204)
+ r = self.session.put(self.url("/api/v1/servers/localhost/zones/" + data['id'] + "/rectify"))
+ self.assertEquals(r.status_code, 200)
+ dbrecs = get_db_records(name, 'SOA')
+ self.assertIsNotNone(dbrecs[0]['ordername'])
+
def test_cname_at_ent_place(self):
name, payload, zone = self.create_zone(dnssec=True, api_rectify=True)
rrset = {
'noncompliant-responses', 'rdqueries', 'empty-queries', 'cache-hits',
'cache-misses', 'cpu-iowait', 'cpu-steal', 'cpu-sys-msec', 'cpu-user-msec', 'fd-usage', 'dyn-blocked',
'dyn-block-nmg-size', 'rule-servfail', 'security-status',
- 'udp-in-errors', 'udp-noport-errors', 'udp-recvbuf-errors', 'udp-sndbuf-errors']
+ 'udp-in-errors', 'udp-noport-errors', 'udp-recvbuf-errors', 'udp-sndbuf-errors',
+ 'doh-query-pipe-full', 'doh-response-pipe-full']
for key in expected:
self.assertIn(key, values)
else
# check if PKCS#11 should be used
if [ "$pkcs11" -eq 1 ]; then
- if [ "$slot" == "" ]; then
- slot=0
- else
- slot=$((slot+1))
- fi
- sudo softhsm --init-token --slot $slot --label label$slot --pin 123$slot --so-pin 123$slot
- kid=`$PDNSUTIL --config-dir=. $configname hsm assign $zone ecdsa256 zsk softhsm label$slot 123$slot label$slot 2>&1 | grep softhsm | awk '{ print $NF }'`
- kid=`$PDNSUTIL --config-dir=. $configname show-zone $zone | grep 'ID =.*ZSK' | awk '{ print $3 }'`
- $PDNSUTIL --config-dir=. $configname hsm create-key $zone $kid
- else
- $PDNSUTIL --config-dir=. $configname secure-zone $zone 2>&1
- if [ "${zone: 0:20}" = "cdnskey-cds-test.com" ]; then
- $PDNSUTIL --config-dir=. $configname set-publish-cds $zone 2>&1
- $PDNSUTIL --config-dir=. $configname set-publish-cdnskey $zone 2>&1
- fi
- fi
+ if [ "$slot" == "" ]; then
+ slot=0
+ else
+ slot=$((slot+1))
+ fi
+ label=pdnstest-${EPOCHSECONDS}-${slot}
+ softhsm2-util --delete-token --label $label 2> /dev/null || true
+ softhsm2-util --init-token --label $label --free --pin 1234 --so-pin 1234
+ kid=`$PDNSUTIL --config-dir=. $configname hsm assign $zone ecdsa256 ksk softhsm2 $label 1234 $label 2>&1 | grep softhsm | awk '{ print $NF }'`
+ $PDNSUTIL --config-dir=. $configname hsm create-key $zone $kid
+ $PDNSUTIL --config-dir=. $configname rectify-zone $zone 2>&1
+ else
+ $PDNSUTIL --config-dir=. $configname secure-zone $zone 2>&1
+ fi
+ if [ "${zone: 0:20}" = "cdnskey-cds-test.com" ]; then
+ $PDNSUTIL --config-dir=. $configname set-publish-cds $zone 2>&1
+ $PDNSUTIL --config-dir=. $configname set-publish-cdnskey $zone 2>&1
+ fi
fi
}