/** convert to ldns rr */
static ldns_rr*
to_rr(struct ub_packed_rrset_key* k, struct packed_rrset_data* d,
- uint32_t now, size_t i, uint16_t type)
+ time_t now, size_t i, uint16_t type)
{
ldns_rr* rr = ldns_rr_new();
ldns_rdf* rdf;
/** dump one rrset zonefile line */
static int
dump_rrset_line(SSL* ssl, struct ub_packed_rrset_key* k,
- struct packed_rrset_data* d, uint32_t now, size_t i, uint16_t type)
+ struct packed_rrset_data* d, time_t now, size_t i, uint16_t type)
{
char* s;
ldns_rr* rr = to_rr(k, d, now, i, type);
/** dump rrset key and data info */
static int
dump_rrset(SSL* ssl, struct ub_packed_rrset_key* k,
- struct packed_rrset_data* d, uint32_t now)
+ struct packed_rrset_data* d, time_t now)
{
size_t i;
/* rd lock held by caller */
if(d->ttl < now) return 1; /* expired */
/* meta line */
- if(!ssl_printf(ssl, ";rrset%s %u %u %u %d %d\n",
+ if(!ssl_printf(ssl, ";rrset%s %lld %u %u %d %d\n",
(k->rk.flags & PACKED_RRSET_NSEC_AT_APEX)?" nsec_apex":"",
- (unsigned)(d->ttl - now),
+ (long long)(d->ttl - now),
(unsigned)d->count, (unsigned)d->rrsig_count,
(int)d->trust, (int)d->security
))
/** dump lruhash rrset cache */
static int
-dump_rrset_lruhash(SSL* ssl, struct lruhash* h, uint32_t now)
+dump_rrset_lruhash(SSL* ssl, struct lruhash* h, time_t now)
{
struct lruhash_entry* e;
/* lruhash already locked by caller */
/** dump message entry */
static int
dump_msg(SSL* ssl, struct query_info* k, struct reply_info* d,
- uint32_t now)
+ time_t now)
{
size_t i;
char* nm, *tp, *cl;
}
/* meta line */
- if(!ssl_printf(ssl, "msg %s %s %s %d %d %u %d %u %u %u\n",
+ if(!ssl_printf(ssl, "msg %s %s %s %d %d %lld %d %u %u %u\n",
nm, cl, tp,
(int)d->flags, (int)d->qdcount,
- (unsigned)(d->ttl-now), (int)d->security,
+ (long long)(d->ttl-now), (int)d->security,
(unsigned)d->an_numrrsets,
(unsigned)d->ns_numrrsets,
(unsigned)d->ar_numrrsets)) {
static int
load_rr(SSL* ssl, ldns_buffer* buf, struct regional* region,
struct ub_packed_rrset_key* rk, struct packed_rrset_data* d,
- unsigned int i, int is_rrsig, int* go_on, uint32_t now)
+ unsigned int i, int is_rrsig, int* go_on, time_t now)
{
ldns_rr* rr;
ldns_status status;
return 0;
}
s = sizeof(*ad) + (sizeof(size_t) + sizeof(uint8_t*) +
- sizeof(uint32_t))* num;
+ sizeof(time_t))* num;
for(i=0; i<num; i++)
s += d->rr_len[i];
ad = (struct packed_rrset_data*)malloc(s);
p += sizeof(size_t)*num;
memmove(p, &d->rr_data[0], sizeof(uint8_t*)*num);
p += sizeof(uint8_t*)*num;
- memmove(p, &d->rr_ttl[0], sizeof(uint32_t)*num);
- p += sizeof(uint32_t)*num;
+ memmove(p, &d->rr_ttl[0], sizeof(time_t)*num);
+ p += sizeof(time_t)*num;
for(i=0; i<num; i++) {
memmove(p, d->rr_data[i], d->rr_len[i]);
p += d->rr_len[i];
struct regional* region = worker->scratchpad;
struct ub_packed_rrset_key* rk;
struct packed_rrset_data* d;
- unsigned int ttl, rr_count, rrsig_count, trust, security;
+ unsigned int rr_count, rrsig_count, trust, security;
+ long long ttl;
unsigned int i;
int go_on = 1;
regional_free_all(region);
s += 10;
rk->rk.flags |= PACKED_RRSET_NSEC_AT_APEX;
}
- if(sscanf(s, " %u %u %u %u %u", &ttl, &rr_count, &rrsig_count,
+ if(sscanf(s, " %lld %u %u %u %u", &ttl, &rr_count, &rrsig_count,
&trust, &security) != 5) {
log_warn("error bad rrset spec %s", s);
return 0;
d->rrsig_count = (size_t)rrsig_count;
d->security = (enum sec_status)security;
d->trust = (enum rrset_trust)trust;
- d->ttl = (uint32_t)ttl + *worker->env.now;
+ d->ttl = (time_t)ttl + *worker->env.now;
d->rr_len = regional_alloc_zero(region,
sizeof(size_t)*(d->count+d->rrsig_count));
d->rr_ttl = regional_alloc_zero(region,
- sizeof(uint32_t)*(d->count+d->rrsig_count));
+ sizeof(time_t)*(d->count+d->rrsig_count));
d->rr_data = regional_alloc_zero(region,
sizeof(uint8_t*)*(d->count+d->rrsig_count));
if(!d->rr_len || !d->rr_ttl || !d->rr_data) {
struct query_info qinf;
struct reply_info rep;
char* s = (char*)ldns_buffer_begin(buf);
- unsigned int flags, qdcount, ttl, security, an, ns, ar;
+ unsigned int flags, qdcount, security, an, ns, ar;
+ long long ttl;
size_t i;
int go_on = 1;
}
/* read remainder of line */
- if(sscanf(s, " %u %u %u %u %u %u %u", &flags, &qdcount, &ttl,
+ if(sscanf(s, " %u %u %lld %u %u %u %u", &flags, &qdcount, &ttl,
&security, &an, &ns, &ar) != 7) {
log_warn("error cannot parse numbers: %s", s);
return 0;
}
rep.flags = (uint16_t)flags;
rep.qdcount = (uint16_t)qdcount;
- rep.ttl = (uint32_t)ttl;
+ rep.ttl = (time_t)ttl;
rep.prefetch_ttl = PREFETCH_TTL_CALC(rep.ttl);
rep.security = (enum sec_status)security;
rep.an_numrrsets = (size_t)an;
{
char buf[257];
struct delegpt_addr* a;
- int lame, dlame, rlame, rto, edns_vs, to, delay, entry_ttl,
+ int lame, dlame, rlame, rto, edns_vs, to, delay,
tA = 0, tAAAA = 0, tother = 0;
+ long long entry_ttl;
struct rtt_info ri;
uint8_t edns_lame_known;
for(a = dp->target_list; a; a = a->next_target) {
return;
continue; /* skip stuff not in infra cache */
}
- if(!ssl_printf(ssl, "%s%s%s%srto %d msec, ttl %d, ping %d "
+ if(!ssl_printf(ssl, "%s%s%s%srto %d msec, ttl %lld, ping %d "
"var %d rtt %d, tA %d, tAAAA %d, tother %d",
lame?"LAME ":"", dlame?"NoDNSSEC ":"",
a->lame?"AddrWasParentSide ":"",
/** labels */
int labs;
/** now */
- uint32_t now;
+ time_t now;
/** time to invalidate to */
- uint32_t expired;
+ time_t expired;
/** number of rrsets removed */
size_t num_rrsets;
/** number of msgs removed */
/** the SSL connection */
SSL* ssl;
/** the time now */
- uint32_t now;
+ time_t now;
};
/** callback for every host element in the infra cache */
*/
uint16_t udpsize = edns->udp_size;
int secure = 0;
- uint32_t timenow = *worker->env.now;
+ time_t timenow = *worker->env.now;
int must_validate = (!(flags&BIT_CD) || worker->env.cfg->ignore_cd)
&& worker->env.need_to_validate;
struct dns_msg *msg = NULL;
struct reply_info* rep, uint16_t id, uint16_t flags,
struct comm_reply* repinfo, struct edns_data* edns)
{
- uint32_t timenow = *worker->env.now;
+ time_t timenow = *worker->env.now;
uint16_t udpsize = edns->udp_size;
int secure;
int must_validate = (!(flags&BIT_CD) || worker->env.cfg->ignore_cd)
/** Reply to client and perform prefetch to keep cache up to date */
static void
reply_and_prefetch(struct worker* worker, struct query_info* qinfo,
- uint16_t flags, struct comm_reply* repinfo, uint32_t leeway)
+ uint16_t flags, struct comm_reply* repinfo, time_t leeway)
{
/* first send answer to client to keep its latency
* as small as a cachereply */
/* prefetch it if the prefetch TTL expired */
if(worker->env.cfg->prefetch && *worker->env.now >=
((struct reply_info*)e->data)->prefetch_ttl) {
- uint32_t leeway = ((struct reply_info*)e->
+ time_t leeway = ((struct reply_info*)e->
data)->ttl - *worker->env.now;
lock_rw_unlock(&e->lock);
reply_and_prefetch(worker, &qinfo,
+20 Aug 2013: Wouter
+ - Fix for 2038, with time_t instead of uint32_t.
+
19 Aug 2013: Wouter
- Fix#519 ub_ctx_delete may hang in some scenarios (libunbound).
struct ub_packed_rrset_key* k;
struct packed_rrset_data* d;
struct rrset_ref ref;
- uint32_t now = *env->now;
+ time_t now = *env->now;
k = alloc_special_obtain(env->alloc);
if(!k)
*/
static int
iter_filter_unsuitable(struct iter_env* iter_env, struct module_env* env,
- uint8_t* name, size_t namelen, uint16_t qtype, uint32_t now,
+ uint8_t* name, size_t namelen, uint16_t qtype, time_t now,
struct delegpt_addr* a)
{
int rtt, lame, reclame, dnsseclame;
/** lookup RTT information, and also store fastest rtt (if any) */
static int
iter_fill_rtt(struct iter_env* iter_env, struct module_env* env,
- uint8_t* name, size_t namelen, uint16_t qtype, uint32_t now,
+ uint8_t* name, size_t namelen, uint16_t qtype, time_t now,
struct delegpt* dp, int* best_rtt, struct sock_list* blacklist)
{
int got_it = 0;
* returns number of best targets (or 0, no suitable targets) */
static int
iter_filter_order(struct iter_env* iter_env, struct module_env* env,
- uint8_t* name, size_t namelen, uint16_t qtype, uint32_t now,
+ uint8_t* name, size_t namelen, uint16_t qtype, time_t now,
struct delegpt* dp, int* selected_rtt, int open_target,
struct sock_list* blacklist)
{
void
iter_dns_store(struct module_env* env, struct query_info* msgqinf,
- struct reply_info* msgrep, int is_referral, uint32_t leeway, int pside,
+ struct reply_info* msgrep, int is_referral, time_t leeway, int pside,
struct regional* region)
{
if(!dns_cache_store(env, msgqinf, msgrep, is_referral, leeway,
/* TTL: NS from referral in iq->deleg_msg,
* or first RR from iq->response,
* or servfail5secs if !iq->response */
- uint32_t ttl = NORR_TTL;
+ time_t ttl = NORR_TTL;
struct ub_packed_rrset_key* neg;
struct packed_rrset_data* newd;
if(rep) {
neg->entry.hash = rrset_key_hash(&neg->rk);
newd = (struct packed_rrset_data*)regional_alloc_zero(env->scratch,
sizeof(struct packed_rrset_data) + sizeof(size_t) +
- sizeof(uint8_t*) + sizeof(uint32_t) + sizeof(uint16_t));
+ sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t));
if(!newd) {
log_err("out of memory in store_parentside_neg");
return;
* but the query resolution can continue without cache storage.
*/
void iter_dns_store(struct module_env* env, struct query_info* qinf,
- struct reply_info* rep, int is_referral, uint32_t leeway, int pside,
+ struct reply_info* rep, int is_referral, time_t leeway, int pside,
struct regional* region);
/**
* @param region: for qrep allocs.
*/
static void
-store_rrsets(struct module_env* env, struct reply_info* rep, uint32_t now,
- uint32_t leeway, int pside, struct reply_info* qrep,
+store_rrsets(struct module_env* env, struct reply_info* rep, time_t now,
+ time_t leeway, int pside, struct reply_info* qrep,
struct regional* region)
{
size_t i;
void
dns_cache_store_msg(struct module_env* env, struct query_info* qinfo,
- hashvalue_t hash, struct reply_info* rep, uint32_t leeway, int pside,
+ hashvalue_t hash, struct reply_info* rep, time_t leeway, int pside,
struct reply_info* qrep, struct regional* region)
{
struct msgreply_entry* e;
- uint32_t ttl = rep->ttl;
+ time_t ttl = rep->ttl;
size_t i;
/* store RRsets */
/** find closest NS or DNAME and returns the rrset (locked) */
static struct ub_packed_rrset_key*
find_closest_of_type(struct module_env* env, uint8_t* qname, size_t qnamelen,
- uint16_t qclass, uint32_t now, uint16_t searchtype, int stripfront)
+ uint16_t qclass, time_t now, uint16_t searchtype, int stripfront)
{
struct ub_packed_rrset_key *rrset;
uint8_t lablen;
/** add addr to additional section */
static void
addr_to_additional(struct ub_packed_rrset_key* rrset, struct regional* region,
- struct dns_msg* msg, uint32_t now)
+ struct dns_msg* msg, time_t now)
{
if((msg->rep->rrsets[msg->rep->rrset_count] =
packed_rrset_copy_region(rrset, region, now))) {
/** lookup message in message cache */
static struct msgreply_entry*
msg_cache_lookup(struct module_env* env, uint8_t* qname, size_t qnamelen,
- uint16_t qtype, uint16_t qclass, uint32_t now, int wr)
+ uint16_t qtype, uint16_t qclass, time_t now, int wr)
{
struct lruhash_entry* e;
struct query_info k;
/** find and add A and AAAA records for nameservers in delegpt */
static int
find_add_addrs(struct module_env* env, uint16_t qclass,
- struct regional* region, struct delegpt* dp, uint32_t now,
+ struct regional* region, struct delegpt* dp, time_t now,
struct dns_msg** msg)
{
struct delegpt_ns* ns;
struct delegpt_ns* ns;
struct msgreply_entry* neg;
struct ub_packed_rrset_key* akey;
- uint32_t now = *env->now;
+ time_t now = *env->now;
for(ns = dp->nslist; ns; ns = ns->next) {
akey = rrset_cache_lookup(env->rrset_cache, ns->name,
ns->namelen, LDNS_RR_TYPE_A, qclass, 0, now, 0);
/** find and add DS or NSEC to delegation msg */
static void
find_add_ds(struct module_env* env, struct regional* region,
- struct dns_msg* msg, struct delegpt* dp, uint32_t now)
+ struct dns_msg* msg, struct delegpt* dp, time_t now)
{
/* Lookup the DS or NSEC at the delegation point. */
struct ub_packed_rrset_key* rrset = rrset_cache_lookup(
int
dns_msg_authadd(struct dns_msg* msg, struct regional* region,
- struct ub_packed_rrset_key* rrset, uint32_t now)
+ struct ub_packed_rrset_key* rrset, time_t now)
{
if(!(msg->rep->rrsets[msg->rep->rrset_count++] =
packed_rrset_copy_region(rrset, region, now)))
struct delegpt*
dns_cache_find_delegation(struct module_env* env, uint8_t* qname,
size_t qnamelen, uint16_t qtype, uint16_t qclass,
- struct regional* region, struct dns_msg** msg, uint32_t now)
+ struct regional* region, struct dns_msg** msg, time_t now)
{
/* try to find closest NS rrset */
struct ub_packed_rrset_key* nskey;
/** generate dns_msg from cached message */
static struct dns_msg*
tomsg(struct module_env* env, struct query_info* q, struct reply_info* r,
- struct regional* region, uint32_t now, struct regional* scratch)
+ struct regional* region, time_t now, struct regional* scratch)
{
struct dns_msg* msg;
size_t i;
/** synthesize RRset-only response from cached RRset item */
static struct dns_msg*
rrset_msg(struct ub_packed_rrset_key* rrset, struct regional* region,
- uint32_t now, struct query_info* q)
+ time_t now, struct query_info* q)
{
struct dns_msg* msg;
struct packed_rrset_data* d = (struct packed_rrset_data*)
/** synthesize DNAME+CNAME response from cached DNAME item */
static struct dns_msg*
synth_dname_msg(struct ub_packed_rrset_key* rrset, struct regional* region,
- uint32_t now, struct query_info* q)
+ time_t now, struct query_info* q)
{
struct dns_msg* msg;
struct ub_packed_rrset_key* ck;
ck->entry.hash = rrset_key_hash(&ck->rk);
newd = (struct packed_rrset_data*)regional_alloc_zero(region,
sizeof(struct packed_rrset_data) + sizeof(size_t) +
- sizeof(uint8_t*) + sizeof(uint32_t) + sizeof(uint16_t)
+ sizeof(uint8_t*) + sizeof(time_t) + sizeof(uint16_t)
+ newlen);
if(!newd)
return NULL;
struct lruhash_entry* e;
struct query_info k;
hashvalue_t h;
- uint32_t now = *env->now;
+ time_t now = *env->now;
struct ub_packed_rrset_key* rrset;
/* lookup first, this has both NXdomains and ANSWER responses */
int
dns_cache_store(struct module_env* env, struct query_info* msgqinf,
- struct reply_info* msgrep, int is_referral, uint32_t leeway, int pside,
+ struct reply_info* msgrep, int is_referral, time_t leeway, int pside,
struct regional* region)
{
struct reply_info* rep = NULL;
* @return 0 on alloc error (out of memory).
*/
int dns_cache_store(struct module_env* env, struct query_info* qinf,
- struct reply_info* rep, int is_referral, uint32_t leeway, int pside,
+ struct reply_info* rep, int is_referral, time_t leeway, int pside,
struct regional* region);
/**
* @param region: to allocate into for qmsg.
*/
void dns_cache_store_msg(struct module_env* env, struct query_info* qinfo,
- hashvalue_t hash, struct reply_info* rep, uint32_t leeway, int pside,
+ hashvalue_t hash, struct reply_info* rep, time_t leeway, int pside,
struct reply_info* qrep, struct regional* region);
/**
*/
struct delegpt* dns_cache_find_delegation(struct module_env* env,
uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
- struct regional* region, struct dns_msg** msg, uint32_t timenow);
+ struct regional* region, struct dns_msg** msg, time_t timenow);
/**
* Find cached message
* @return true if worked, false on fail
*/
int dns_msg_authadd(struct dns_msg* msg, struct regional* region,
- struct ub_packed_rrset_key* rrset, uint32_t now);
+ struct ub_packed_rrset_key* rrset, time_t now);
#endif /* SERVICES_CACHE_DNS_H */
/** init the data elements */
static void
data_entry_init(struct infra_cache* infra, struct lruhash_entry* e,
- uint32_t timenow)
+ time_t timenow)
{
struct infra_data* data = (struct infra_data*)e->data;
data->ttl = timenow + infra->host_ttl;
*/
static struct lruhash_entry*
new_entry(struct infra_cache* infra, struct sockaddr_storage* addr,
- socklen_t addrlen, uint8_t* name, size_t namelen, uint32_t tm)
+ socklen_t addrlen, uint8_t* name, size_t namelen, time_t tm)
{
struct infra_data* data;
struct infra_key* key = (struct infra_key*)malloc(sizeof(*key));
int
infra_host(struct infra_cache* infra, struct sockaddr_storage* addr,
- socklen_t addrlen, uint8_t* nm, size_t nmlen, uint32_t timenow,
+ socklen_t addrlen, uint8_t* nm, size_t nmlen, time_t timenow,
int* edns_vs, uint8_t* edns_lame_known, int* to)
{
struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen,
int
infra_set_lame(struct infra_cache* infra, struct sockaddr_storage* addr,
- socklen_t addrlen, uint8_t* nm, size_t nmlen, uint32_t timenow,
+ socklen_t addrlen, uint8_t* nm, size_t nmlen, time_t timenow,
int dnsseclame, int reclame, uint16_t qtype)
{
struct infra_data* data;
int
infra_rtt_update(struct infra_cache* infra, struct sockaddr_storage* addr,
socklen_t addrlen, uint8_t* nm, size_t nmlen, int qtype,
- int roundtrip, int orig_rtt, uint32_t timenow)
+ int roundtrip, int orig_rtt, time_t timenow)
{
struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen,
nm, nmlen, 1);
return rto;
}
-int infra_get_host_rto(struct infra_cache* infra,
+long long infra_get_host_rto(struct infra_cache* infra,
struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* nm,
- size_t nmlen, struct rtt_info* rtt, int* delay, uint32_t timenow,
+ size_t nmlen, struct rtt_info* rtt, int* delay, time_t timenow,
int* tA, int* tAAAA, int* tother)
{
struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen,
nm, nmlen, 0);
struct infra_data* data;
- int ttl = -2;
+ long long ttl = -2;
if(!e) return -1;
data = (struct infra_data*)e->data;
if(data->ttl >= timenow) {
- ttl = (int)(data->ttl - timenow);
+ ttl = (long long)(data->ttl - timenow);
memmove(rtt, &data->rtt, sizeof(*rtt));
if(timenow < data->probedelay)
*delay = (int)(data->probedelay - timenow);
int
infra_edns_update(struct infra_cache* infra, struct sockaddr_storage* addr,
socklen_t addrlen, uint8_t* nm, size_t nmlen, int edns_version,
- uint32_t timenow)
+ time_t timenow)
{
struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen,
nm, nmlen, 1);
infra_get_lame_rtt(struct infra_cache* infra,
struct sockaddr_storage* addr, socklen_t addrlen,
uint8_t* name, size_t namelen, uint16_t qtype,
- int* lame, int* dnsseclame, int* reclame, int* rtt, uint32_t timenow)
+ int* lame, int* dnsseclame, int* reclame, int* rtt, time_t timenow)
{
struct infra_data* host;
struct lruhash_entry* e = infra_lookup_nottl(infra, addr, addrlen,
*/
struct infra_data {
/** TTL value for this entry. absolute time. */
- uint32_t ttl;
+ time_t ttl;
/** time in seconds (absolute) when probing re-commences, 0 disabled */
- uint32_t probedelay;
+ time_t probedelay;
/** round trip times for timeout calculation */
struct rtt_info rtt;
*/
int infra_host(struct infra_cache* infra, struct sockaddr_storage* addr,
socklen_t addrlen, uint8_t* name, size_t namelen,
- uint32_t timenow, int* edns_vs, uint8_t* edns_lame_known, int* to);
+ time_t timenow, int* edns_vs, uint8_t* edns_lame_known, int* to);
/**
* Set a host to be lame for the given zone.
*/
int infra_set_lame(struct infra_cache* infra,
struct sockaddr_storage* addr, socklen_t addrlen,
- uint8_t* name, size_t namelen, uint32_t timenow, int dnsseclame,
+ uint8_t* name, size_t namelen, time_t timenow, int dnsseclame,
int reclame, uint16_t qtype);
/**
*/
int infra_rtt_update(struct infra_cache* infra, struct sockaddr_storage* addr,
socklen_t addrlen, uint8_t* name, size_t namelen, int qtype,
- int roundtrip, int orig_rtt, uint32_t timenow);
+ int roundtrip, int orig_rtt, time_t timenow);
/**
* Update information for the host, store that a TCP transaction works.
*/
int infra_edns_update(struct infra_cache* infra,
struct sockaddr_storage* addr, socklen_t addrlen,
- uint8_t* name, size_t namelen, int edns_version, uint32_t timenow);
+ uint8_t* name, size_t namelen, int edns_version, time_t timenow);
/**
* Get Lameness information and average RTT if host is in the cache.
int infra_get_lame_rtt(struct infra_cache* infra,
struct sockaddr_storage* addr, socklen_t addrlen,
uint8_t* name, size_t namelen, uint16_t qtype,
- int* lame, int* dnsseclame, int* reclame, int* rtt, uint32_t timenow);
+ int* lame, int* dnsseclame, int* reclame, int* rtt, time_t timenow);
/**
* Get additional (debug) info on timing.
* @return TTL the infra host element is valid for. If -1: not found in cache.
* TTL -2: found but expired.
*/
-int infra_get_host_rto(struct infra_cache* infra,
+long long infra_get_host_rto(struct infra_cache* infra,
struct sockaddr_storage* addr, socklen_t addrlen, uint8_t* name,
- size_t namelen, struct rtt_info* rtt, int* delay, uint32_t timenow,
+ size_t namelen, struct rtt_info* rtt, int* delay, time_t timenow,
int* tA, int* tAAAA, int* tother);
/**
/** see if rrset needs to be updated in the cache */
static int
-need_to_update_rrset(void* nd, void* cd, uint32_t timenow, int equal, int ns)
+need_to_update_rrset(void* nd, void* cd, time_t timenow, int equal, int ns)
{
struct packed_rrset_data* newd = (struct packed_rrset_data*)nd;
struct packed_rrset_data* cached = (struct packed_rrset_data*)cd;
int
rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref,
- struct alloc_cache* alloc, uint32_t timenow)
+ struct alloc_cache* alloc, time_t timenow)
{
struct lruhash_entry* e;
struct ub_packed_rrset_key* k = ref->key;
struct ub_packed_rrset_key*
rrset_cache_lookup(struct rrset_cache* r, uint8_t* qname, size_t qnamelen,
- uint16_t qtype, uint16_t qclass, uint32_t flags, uint32_t timenow,
+ uint16_t qtype, uint16_t qclass, uint32_t flags, time_t timenow,
int wr)
{
struct lruhash_entry* e;
}
int
-rrset_array_lock(struct rrset_ref* ref, size_t count, uint32_t timenow)
+rrset_array_lock(struct rrset_ref* ref, size_t count, time_t timenow)
{
size_t i;
for(i=0; i<count; i++) {
void
rrset_update_sec_status(struct rrset_cache* r,
- struct ub_packed_rrset_key* rrset, uint32_t now)
+ struct ub_packed_rrset_key* rrset, time_t now)
{
struct packed_rrset_data* updata =
(struct packed_rrset_data*)rrset->entry.data;
void
rrset_check_sec_status(struct rrset_cache* r,
- struct ub_packed_rrset_key* rrset, uint32_t now)
+ struct ub_packed_rrset_key* rrset, time_t now)
{
struct packed_rrset_data* updata =
(struct packed_rrset_data*)rrset->entry.data;
* also the rdata is equal (but other parameters in cache are superior).
*/
int rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref,
- struct alloc_cache* alloc, uint32_t timenow);
+ struct alloc_cache* alloc, time_t timenow);
/**
* Lookup rrset. You obtain read/write lock. You must unlock before lookup
*/
struct ub_packed_rrset_key* rrset_cache_lookup(struct rrset_cache* r,
uint8_t* qname, size_t qnamelen, uint16_t qtype, uint16_t qclass,
- uint32_t flags, uint32_t timenow, int wr);
+ uint32_t flags, time_t timenow, int wr);
/**
* Obtain readlock on a (sorted) list of rrset references.
* RRsets have been purged from the cache.
* If true, you hold readlocks on all the ref items.
*/
-int rrset_array_lock(struct rrset_ref* ref, size_t count, uint32_t timenow);
+int rrset_array_lock(struct rrset_ref* ref, size_t count, time_t timenow);
/**
* Unlock array (sorted) of rrset references.
* @param now: current time.
*/
void rrset_update_sec_status(struct rrset_cache* r,
- struct ub_packed_rrset_key* rrset, uint32_t now);
+ struct ub_packed_rrset_key* rrset, time_t now);
/**
* Looks up security status of an rrset. Looks up the rrset.
* @param now: current time.
*/
void rrset_check_sec_status(struct rrset_cache* r,
- struct ub_packed_rrset_key* rrset, uint32_t now);
+ struct ub_packed_rrset_key* rrset, time_t now);
/**
* Remove an rrset from the cache, by name and type and flags
/** return name and class and rdata of rr; parses string */
static int
get_rr_content(const char* str, uint8_t** nm, uint16_t* type,
- uint16_t* dclass, uint32_t* ttl, ldns_buffer* rdata)
+ uint16_t* dclass, time_t* ttl, ldns_buffer* rdata)
{
ldns_rr* rr = NULL;
ldns_status status = ldns_rr_new_frm_str(&rr, str, 3600, NULL, NULL);
}
*dclass = ldns_rr_get_class(rr);
*type = ldns_rr_get_type(rr);
- *ttl = (uint32_t)ldns_rr_ttl(rr);
+ *ttl = (time_t)ldns_rr_ttl(rr);
ldns_buffer_clear(rdata);
ldns_buffer_skip(rdata, 2);
status = ldns_rr_rdata2buffer_wire(rdata, rr);
/** insert RR into RRset data structure; Wastes a couple of bytes */
static int
insert_rr(struct regional* region, struct packed_rrset_data* pd,
- ldns_buffer* buf, uint32_t ttl)
+ ldns_buffer* buf, time_t ttl)
{
size_t* oldlen = pd->rr_len;
- uint32_t* oldttl = pd->rr_ttl;
+ time_t* oldttl = pd->rr_ttl;
uint8_t** olddata = pd->rr_data;
/* add RR to rrset */
struct local_rrset* rrset;
struct packed_rrset_data* pd;
uint16_t rrtype = 0, rrclass = 0;
- uint32_t ttl = 0;
+ time_t ttl = 0;
if(!get_rr_content(rrstr, &nm, &rrtype, &rrclass, &ttl, buf)) {
log_err("bad local-data: %s", rrstr);
return 0;
}
void mesh_new_prefetch(struct mesh_area* mesh, struct query_info* qinfo,
- uint16_t qflags, uint32_t leeway)
+ uint16_t qflags, time_t leeway)
{
struct mesh_state* s = mesh_area_find(mesh, qinfo, qflags&BIT_RD, 0);
#ifdef UNBOUND_DEBUG
* @param leeway: TTL leeway what to expire earlier for this update.
*/
void mesh_new_prefetch(struct mesh_area* mesh, struct query_info* qinfo,
- uint16_t qflags, uint32_t leeway);
+ uint16_t qflags, time_t leeway);
/**
* Handle new event from the wire. A serviced query has returned.
{
int rtt, vs;
uint8_t edns_lame_known;
- uint32_t now = *sq->outnet->now_secs;
+ time_t now = *sq->outnet->now_secs;
if(!infra_host(sq->outnet->infra, &sq->addr, sq->addrlen, sq->zone,
sq->zonelen, now, &vs, &edns_lame_known, &rtt))
if(roundtime < TCP_AUTH_QUERY_TIMEOUT*1000) {
if(!infra_rtt_update(sq->outnet->infra, &sq->addr,
sq->addrlen, sq->zone, sq->zonelen, sq->qtype,
- roundtime, sq->last_rtt, (uint32_t)now.tv_sec))
+ roundtime, sq->last_rtt, (time_t)now.tv_sec))
log_err("out of memory noting rtt.");
}
}
sq->retry++;
if(!(rto=infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
sq->zone, sq->zonelen, sq->qtype, -1, sq->last_rtt,
- (uint32_t)now.tv_sec)))
+ (time_t)now.tv_sec)))
log_err("out of memory in UDP exponential backoff");
if(sq->retry < OUTBOUND_UDP_RETRY) {
log_name_addr(VERB_ALGO, "retry query", sq->qbuf+10,
/* only store noEDNS in cache if domain is noDNSSEC */
if(!sq->want_dnssec)
if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
- sq->zone, sq->zonelen, -1, (uint32_t)now.tv_sec)) {
+ sq->zone, sq->zonelen, -1, (time_t)now.tv_sec)) {
log_err("Out of memory caching no edns for host");
}
sq->status = serviced_query_UDP;
log_addr(VERB_ALGO, "serviced query: EDNS works for",
&sq->addr, sq->addrlen);
if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
- sq->zone, sq->zonelen, 0, (uint32_t)now.tv_sec)) {
+ sq->zone, sq->zonelen, 0, (time_t)now.tv_sec)) {
log_err("Out of memory caching edns works");
}
sq->edns_lame_known = 1;
log_addr(VERB_ALGO, "serviced query: EDNS fails for",
&sq->addr, sq->addrlen);
if(!infra_edns_update(outnet->infra, &sq->addr, sq->addrlen,
- sq->zone, sq->zonelen, -1, (uint32_t)now.tv_sec)) {
+ sq->zone, sq->zonelen, -1, (time_t)now.tv_sec)) {
log_err("Out of memory caching no edns for host");
}
} else {
if(roundtime < 60000) {
if(!infra_rtt_update(outnet->infra, &sq->addr, sq->addrlen,
sq->zone, sq->zonelen, sq->qtype, roundtime,
- sq->last_rtt, (uint32_t)now.tv_sec))
+ sq->last_rtt, (time_t)now.tv_sec))
log_err("out of memory noting rtt.");
}
}
/** Base for select calls */
struct comm_base* base;
/** pointer to time in seconds */
- uint32_t* now_secs;
+ time_t* now_secs;
/** pointer to time in microseconds */
struct timeval* now_tv;
#endif
}
timeval_add(&runtime->now_tv, &tv);
- runtime->now_secs = (uint32_t)runtime->now_tv.tv_sec;
+ runtime->now_secs = (time_t)runtime->now_tv.tv_sec;
#ifndef S_SPLINT_S
log_info("elapsed %d.%6.6d now %d.%6.6d",
(int)tv.tv_sec, (int)tv.tv_usec,
}
void
-comm_base_timept(struct comm_base* b, uint32_t** tt, struct timeval** tv)
+comm_base_timept(struct comm_base* b, time_t** tt, struct timeval** tv)
{
struct replay_runtime* runtime = (struct replay_runtime*)b;
*tt = &runtime->now_secs;
/* check for functions */
if(strcmp(buf, "time") == 0) {
- snprintf(buf, sizeof(buf), "%u", (unsigned)runtime->now_secs);
+ snprintf(buf, sizeof(buf), "%lld", (long long)runtime->now_secs);
*text += len;
return strdup(buf);
} else if(strcmp(buf, "timeout") == 0) {
- uint32_t res = 0;
+ time_t res = 0;
struct fake_timer* t = first_timer(runtime);
- if(t && (uint32_t)t->tv.tv_sec >= runtime->now_secs)
- res = (uint32_t)t->tv.tv_sec - runtime->now_secs;
- snprintf(buf, sizeof(buf), "%u", (unsigned)res);
+ if(t && (time_t)t->tv.tv_sec >= runtime->now_secs)
+ res = (time_t)t->tv.tv_sec - runtime->now_secs;
+ snprintf(buf, sizeof(buf), "%lld", (long long)res);
*text += len;
return strdup(buf);
} else if(strncmp(buf, "ctime ", 6) == 0 ||
struct infra_cache* infra;
/** the current time in seconds */
- uint32_t now_secs;
+ time_t now_secs;
/** the current time in microseconds */
struct timeval now_tv;
/** performance test message encoding */
static void
perf_encode(struct query_info* qi, struct reply_info* rep, uint16_t id,
- uint16_t flags, ldns_buffer* out, uint32_t timenow,
+ uint16_t flags, ldns_buffer* out, time_t timenow,
struct edns_data* edns)
{
static int num = 0;
int ret;
uint16_t id;
uint16_t flags;
- uint32_t timenow = 0;
+ time_t timenow = 0;
struct regional* region = regional_create();
struct edns_data edns;
struct packed_rrset_data rd;
struct ub_packed_rrset_key nsec;
size_t rr_len;
- uint32_t rr_ttl;
+ time_t rr_ttl;
uint8_t* rr_data;
char* zname = get_random_zone();
char* from, *to;
struct entry* list = read_datafile(fname, 1);
struct module_env env;
struct val_env ve;
- uint32_t now = time(NULL);
+ time_t now = time(NULL);
if(!list)
fatal_exit("could not read %s: %s", fname, strerror(errno));
else S_YNO("prefetch:", prefetch)
else S_YNO("prefetch-key:", prefetch_key)
else if(strcmp(opt, "cache-max-ttl:") == 0)
- { IS_NUMBER_OR_ZERO; cfg->max_ttl = atoi(val); MAX_TTL=cfg->max_ttl;}
+ { IS_NUMBER_OR_ZERO; cfg->max_ttl = atoi(val); MAX_TTL=(uint32_t)cfg->max_ttl;}
else if(strcmp(opt, "cache-min-ttl:") == 0)
- { IS_NUMBER_OR_ZERO; cfg->min_ttl = atoi(val); MIN_TTL=cfg->min_ttl;}
+ { IS_NUMBER_OR_ZERO; cfg->min_ttl = atoi(val); MIN_TTL=(uint32_t)cfg->min_ttl;}
else S_NUMBER_OR_ZERO("infra-host-ttl:", host_ttl)
else S_POW2("infra-cache-slabs:", infra_cache_slabs)
else S_SIZET_NONZERO("infra-cache-numhosts:", infra_cache_numhosts)
else S_STR("python-script:", python_script)
/* val_sig_skew_min and max are copied into val_env during init,
* so this does not update val_env with set_option */
- else S_NUMBER_OR_ZERO("val-sig-skew-min:", val_sig_skew_min)
- else S_NUMBER_OR_ZERO("val-sig-skew-max:", val_sig_skew_max)
+ else if(strcmp(opt, "val-sig-skew-min:") == 0)
+ { IS_NUMBER_OR_ZERO; cfg->val_sig_skew_min = (int32_t)atoi(val); }
+ else if(strcmp(opt, "val-sig-skew-max:") == 0)
+ { IS_NUMBER_OR_ZERO; cfg->val_sig_skew_max = (int32_t)atoi(val); }
else if (strcmp(opt, "outgoing-interface:") == 0) {
char* d = strdup(val);
char** oi = (char**)malloc((cfg->num_out_ifs+1)*sizeof(char*));
return 1;
}
-uint32_t
+time_t
cfg_convert_timeval(const char* str)
{
- uint32_t t;
+ time_t t;
struct tm tm;
memset(&tm, 0, sizeof(tm));
if(strlen(str) < 14)
* @param str: string of 14 digits
* @return time value or 0 for error.
*/
-uint32_t cfg_convert_timeval(const char* str);
+time_t cfg_convert_timeval(const char* str);
/**
* Count number of values in the string.
/** store rrset in buffer in wireformat, return RETVAL_* */
static int
packed_rrset_encode(struct ub_packed_rrset_key* key, ldns_buffer* pkt,
- uint16_t* num_rrs, uint32_t timenow, struct regional* region,
+ uint16_t* num_rrs, time_t timenow, struct regional* region,
int do_data, int do_sig, struct compress_tree_node** tree,
ldns_pkt_section s, uint16_t qtype, int dnssec, size_t rr_offset)
{
/** store msg section in wireformat buffer, return RETVAL_* */
static int
insert_section(struct reply_info* rep, size_t num_rrsets, uint16_t* num_rrs,
- ldns_buffer* pkt, size_t rrsets_before, uint32_t timenow,
+ ldns_buffer* pkt, size_t rrsets_before, time_t timenow,
struct regional* region, struct compress_tree_node** tree,
ldns_pkt_section s, uint16_t qtype, int dnssec, size_t rr_offset)
{
int
reply_info_encode(struct query_info* qinfo, struct reply_info* rep,
- uint16_t id, uint16_t flags, ldns_buffer* buffer, uint32_t timenow,
+ uint16_t id, uint16_t flags, ldns_buffer* buffer, time_t timenow,
struct regional* region, uint16_t udpsize, int dnssec)
{
uint16_t ancount=0, nscount=0, arcount=0;
int
reply_info_answer_encode(struct query_info* qinf, struct reply_info* rep,
- uint16_t id, uint16_t qflags, ldns_buffer* pkt, uint32_t timenow,
+ uint16_t id, uint16_t qflags, ldns_buffer* pkt, time_t timenow,
int cached, struct regional* region, uint16_t udpsize,
struct edns_data* edns, int dnssec, int secure)
{
* @return: 0 on error (server failure).
*/
int reply_info_answer_encode(struct query_info* qinf, struct reply_info* rep,
- uint16_t id, uint16_t qflags, ldns_buffer* dest, uint32_t timenow,
+ uint16_t id, uint16_t qflags, ldns_buffer* dest, time_t timenow,
int cached, struct regional* region, uint16_t udpsize,
struct edns_data* edns, int dnssec, int secure);
* 0 on error: malloc failure (no log_err has been done).
*/
int reply_info_encode(struct query_info* qinfo, struct reply_info* rep,
- uint16_t id, uint16_t flags, ldns_buffer* buffer, uint32_t timenow,
+ uint16_t id, uint16_t flags, ldns_buffer* buffer, time_t timenow,
struct regional* region, uint16_t udpsize, int dnssec);
/**
/** constructor for replyinfo */
static struct reply_info*
construct_reply_info_base(struct regional* region, uint16_t flags, size_t qd,
- uint32_t ttl, uint32_t prettl, size_t an, size_t ns, size_t ar,
+ time_t ttl, time_t prettl, size_t an, size_t ns, size_t ar,
size_t total, enum sec_status sec)
{
struct reply_info* rep;
/** do the rdata copy */
static int
rdata_copy(ldns_buffer* pkt, struct packed_rrset_data* data, uint8_t* to,
- struct rr_parse* rr, uint32_t* rr_ttl, uint16_t type)
+ struct rr_parse* rr, time_t* rr_ttl, uint16_t type)
{
uint16_t pkt_len;
const ldns_rr_descriptor* desc;
data->rr_len = (size_t*)((uint8_t*)data +
sizeof(struct packed_rrset_data));
data->rr_data = (uint8_t**)&(data->rr_len[total]);
- data->rr_ttl = (uint32_t*)&(data->rr_data[total]);
+ data->rr_ttl = (time_t*)&(data->rr_data[total]);
nextrdata = (uint8_t*)&(data->rr_ttl[total]);
for(i=0; i<data->count; i++) {
data->rr_len[i] = rr->size;
/* allocate */
size_t s = sizeof(struct packed_rrset_data) +
(pset->rr_count + pset->rrsig_count) *
- (sizeof(size_t)+sizeof(uint8_t*)+sizeof(uint32_t)) +
+ (sizeof(size_t)+sizeof(uint8_t*)+sizeof(time_t)) +
pset->size;
if(region)
*data = regional_alloc(region, s);
}
void
-reply_info_set_ttls(struct reply_info* rep, uint32_t timenow)
+reply_info_set_ttls(struct reply_info* rep, time_t timenow)
{
size_t i, j;
rep->ttl += timenow;
*/
uint8_t qdcount;
+ /** 32 bit padding to pad struct member alignment to 64 bits. */
+ uint32_t padding;
+
/**
* TTL of the entire reply (for negative caching).
* only for use when there are 0 RRsets in this message.
* if there are RRsets, check those instead.
*/
- uint32_t ttl;
+ time_t ttl;
/**
* TTL for prefetch. After it has expired, a prefetch is suitable.
* Smaller than the TTL, otherwise the prefetch would not happen.
*/
- uint32_t prefetch_ttl;
-
- /** 32 bit padding to pad struct member alignment to 64 bits. */
- uint32_t padding;
+ time_t prefetch_ttl;
/**
* The security status from DNSSEC validation of this message.
* Also refs must be filled in.
* @param timenow: the current time.
*/
-void reply_info_set_ttls(struct reply_info* rep, uint32_t timenow);
+void reply_info_set_ttls(struct reply_info* rep, time_t timenow);
/**
* Delete reply_info and packed_rrsets (while they are not yet added to the
data->rr_len = (size_t*)((uint8_t*)data +
sizeof(struct packed_rrset_data));
data->rr_data = (uint8_t**)&(data->rr_len[total]);
- data->rr_ttl = (uint32_t*)&(data->rr_data[total]);
+ data->rr_ttl = (time_t*)&(data->rr_data[total]);
nextrdata = (uint8_t*)&(data->rr_ttl[total]);
for(i=0; i<total; i++) {
data->rr_data[i] = nextrdata;
}
void
-packed_rrset_ttl_add(struct packed_rrset_data* data, uint32_t add)
+packed_rrset_ttl_add(struct packed_rrset_data* data, time_t add)
{
size_t i;
size_t total = data->count + data->rrsig_count;
ntohs(rrset->rk.type), ntohs(rrset->rk.rrset_class));
}
-uint32_t
+time_t
ub_packed_rrset_ttl(struct ub_packed_rrset_key* key)
{
struct packed_rrset_data* d = (struct packed_rrset_data*)key->
struct ub_packed_rrset_key*
packed_rrset_copy_region(struct ub_packed_rrset_key* key,
- struct regional* region, uint32_t now)
+ struct regional* region, time_t now)
{
struct ub_packed_rrset_key* ck = regional_alloc(region,
sizeof(struct ub_packed_rrset_key));
struct ub_packed_rrset_key*
packed_rrset_copy_alloc(struct ub_packed_rrset_key* key,
- struct alloc_cache* alloc, uint32_t now)
+ struct alloc_cache* alloc, time_t now)
{
struct packed_rrset_data* fd, *dd;
struct ub_packed_rrset_key* dk = alloc_special_obtain(alloc);
/* allocate */
total = count + rrsig_count;
- len += sizeof(*data) + total*(sizeof(size_t) + sizeof(uint32_t) +
+ len += sizeof(*data) + total*(sizeof(size_t) + sizeof(time_t) +
sizeof(uint8_t*));
data = (struct packed_rrset_data*)calloc(1, len);
if(!data)
data->rr_len = (size_t*)((uint8_t*)data +
sizeof(struct packed_rrset_data));
data->rr_data = (uint8_t**)&(data->rr_len[total]);
- data->rr_ttl = (uint32_t*)&(data->rr_data[total]);
+ data->rr_ttl = (time_t*)&(data->rr_data[total]);
nextrdata = (uint8_t*)&(data->rr_ttl[total]);
/* fill out len, ttl, fields */
struct packed_rrset_data {
/** TTL (in seconds like time()) of the rrset.
* Same for all RRs see rfc2181(5.2). */
- uint32_t ttl;
+ time_t ttl;
/** number of rrs. */
size_t count;
/** number of rrsigs, if 0 no rrsigs */
/** length of every rr's rdata, rr_len[i] is size of rr_data[i]. */
size_t* rr_len;
/** ttl of every rr. rr_ttl[i] ttl of rr i. */
- uint32_t *rr_ttl;
+ time_t *rr_ttl;
/**
* Array of pointers to every rr's rdata.
* The rr_data[i] rdata is stored in uncompressed wireformat.
* @param key: rrset key, with data to examine.
* @return ttl value.
*/
-uint32_t ub_packed_rrset_ttl(struct ub_packed_rrset_key* key);
+time_t ub_packed_rrset_ttl(struct ub_packed_rrset_key* key);
/**
* Calculate memory size of rrset entry. For hash table usage.
* @param data: rrset data structure. Otherwise correctly filled in.
* @param add: how many seconds to add, pass time(0) for example.
*/
-void packed_rrset_ttl_add(struct packed_rrset_data* data, uint32_t add);
+void packed_rrset_ttl_add(struct packed_rrset_data* data, time_t add);
/**
* Utility procedure to extract CNAME target name from its rdata.
*/
struct ub_packed_rrset_key* packed_rrset_copy_region(
struct ub_packed_rrset_key* key, struct regional* region,
- uint32_t now);
+ time_t now);
/**
* Allocate rrset with malloc (from region or you are holding the lock).
*/
struct ub_packed_rrset_key* packed_rrset_copy_alloc(
struct ub_packed_rrset_key* key, struct alloc_cache* alloc,
- uint32_t now);
+ time_t now);
/**
* Create a ub_packed_rrset_key allocated on the heap.
static int logging_to_syslog = 0;
#endif /* HAVE_SYSLOG_H */
/** time to print in log, if NULL, use time(2) */
-static uint32_t* log_now = NULL;
+static time_t* log_now = NULL;
/** print time in UTC or in secondsfrom1970 */
static int log_time_asc = 0;
ident = id;
}
-void log_set_time(uint32_t* t)
+void log_set_time(time_t* t)
{
log_now = t;
}
* @param t: the point is copied and used to find the time.
* if NULL, time(2) is used.
*/
-void log_set_time(uint32_t* t);
+void log_set_time(time_t* t);
/**
* Set if the time value is printed ascii or decimal in log entries.
return -1;
}
#ifndef S_SPLINT_S
- *base->time_secs = (uint32_t)base->time_tv->tv_sec;
+ *base->time_secs = (time_t)base->time_tv->tv_sec;
#endif
return 0;
}
/** create event base */
-void *event_init(uint32_t* time_secs, struct timeval* time_tv)
+void *event_init(time_t* time_secs, struct timeval* time_tv)
{
struct event_base* base = (struct event_base*)malloc(
sizeof(struct event_base));
/** if we need to exit */
int need_to_exit;
/** where to store time in seconds */
- uint32_t* time_secs;
+ time_t* time_secs;
/** where to store time in microseconds */
struct timeval* time_tv;
};
/* function prototypes (some are as they appear in event.h) */
/** create event base */
-void *event_init(uint32_t* time_secs, struct timeval* time_tv);
+void *event_init(time_t* time_secs, struct timeval* time_tv);
/** get version */
const char *event_get_version(void);
/** get polling method, select */
/** random table to generate random numbers */
struct ub_randstate* rnd;
/** time in seconds, converted to integer */
- uint32_t* now;
+ time_t* now;
/** time in microseconds. Relatively recent. */
struct timeval* now_tv;
/** is validation required for messages, controls client-facing
/** mesh related information for this query */
struct mesh_state* mesh_info;
/** how many seconds before expiry is this prefetched (0 if not) */
- uint32_t prefetch_leeway;
+ time_t prefetch_leeway;
};
/**
/** libevent event_base type. */
struct event_base* base;
/** seconds time pointer points here */
- uint32_t secs;
+ time_t secs;
/** timeval with current time */
struct timeval now;
/** the event used for slow_accept timeouts */
if(gettimeofday(&b->eb->now, NULL) < 0) {
log_err("gettimeofday: %s", strerror(errno));
}
- b->eb->secs = (uint32_t)b->eb->now.tv_sec;
+ b->eb->secs = (time_t)b->eb->now.tv_sec;
}
#endif /* USE_MINI_EVENT */
}
void
-comm_base_timept(struct comm_base* b, uint32_t** tt, struct timeval** tv)
+comm_base_timept(struct comm_base* b, time_t** tt, struct timeval** tv)
{
*tt = &b->eb->secs;
*tv = &b->eb->now;
* @param tt: pointer to time in seconds is returned.
* @param tv: pointer to time in microseconds is returned.
*/
-void comm_base_timept(struct comm_base* b, uint32_t** tt, struct timeval** tv);
+void comm_base_timept(struct comm_base* b, time_t** tt, struct timeval** tv);
/**
* Dispatch the comm base events.
return -1;
}
#ifndef S_SPLINT_S
- *base->time_secs = (uint32_t)base->time_tv->tv_sec;
+ *base->time_secs = (time_t)base->time_tv->tv_sec;
#endif
return 0;
}
}
}
-void *event_init(uint32_t* time_secs, struct timeval* time_tv)
+void *event_init(time_t* time_secs, struct timeval* time_tv)
{
struct event_base* base = (struct event_base*)malloc(
sizeof(struct event_base));
/** if we need to exit */
int need_to_exit;
/** where to store time in seconds */
- uint32_t* time_secs;
+ time_t* time_secs;
/** where to store time in microseconds */
struct timeval* time_tv;
/**
};
/** create event base */
-void *event_init(uint32_t* time_secs, struct timeval* time_tv);
+void *event_init(time_t* time_secs, struct timeval* time_tv);
/** get version */
const char *event_get_version(void);
/** get polling method (select,epoll) */
if (pos < 0 || !timestamp)
ta->last_change = 0;
else
- ta->last_change = (uint32_t)timestamp;
+ ta->last_change = (time_t)timestamp;
free(comment);
return 1;
} else if(strncmp(line, ";;query_interval: ", 18) == 0) {
if(!tp) return -1;
lock_basic_lock(&tp->lock);
- tp->autr->query_interval = (uint32_t)parse_int(line+18, &r);
+ tp->autr->query_interval = (time_t)parse_int(line+18, &r);
lock_basic_unlock(&tp->lock);
} else if(strncmp(line, ";;retry_time: ", 14) == 0) {
if(!tp) return -1;
lock_basic_lock(&tp->lock);
- tp->autr->retry_time = (uint32_t)parse_int(line+14, &r);
+ tp->autr->retry_time = (time_t)parse_int(line+14, &r);
lock_basic_unlock(&tp->lock);
}
return r;
}
/** Find minimum expiration interval from signatures */
-static uint32_t
+static time_t
min_expiry(struct module_env* env, ldns_rr_list* rrset)
{
size_t i;
- uint32_t t, r = 15 * 24 * 3600; /* 15 days max */
+ int32_t t, r = 15 * 24 * 3600; /* 15 days max */
for(i=0; i<ldns_rr_list_rr_count(rrset); i++) {
ldns_rr* rr = ldns_rr_list_rr(rrset, i);
if(ldns_rr_get_type(rr) != LDNS_RR_TYPE_RRSIG)
continue;
t = ldns_rdf2native_int32(ldns_rr_rrsig_expiration(rr));
- if(t - *env->now > 0) {
+ if((int32_t)t - (int32_t)*env->now > 0) {
t -= *env->now;
if(t < r)
r = t;
}
}
- return r;
+ return (time_t)r;
}
/** Is rr self-signed revoked key */
}
/** get TTL from DNSKEY rrset */
-static uint32_t
+static time_t
key_ttl(struct ub_packed_rrset_key* k)
{
struct packed_rrset_data* d = (struct packed_rrset_data*)k->entry.data;
/** update the time values for the trustpoint */
static void
-set_tp_times(struct trust_anchor* tp, uint32_t rrsig_exp_interval,
- uint32_t origttl, int* changed)
+set_tp_times(struct trust_anchor* tp, time_t rrsig_exp_interval,
+ time_t origttl, int* changed)
{
- uint32_t x, qi = tp->autr->query_interval, rt = tp->autr->retry_time;
+ time_t x, qi = tp->autr->query_interval, rt = tp->autr->retry_time;
/* x = MIN(15days, ttl/2, expire/2) */
x = 15 * 24 * 3600;
/** calculate next probe time */
static time_t
-calc_next_probe(struct module_env* env, uint32_t wait)
+calc_next_probe(struct module_env* env, time_t wait)
{
/* make it random, 90-100% */
- uint32_t rnd, rest;
+ time_t rnd, rest;
if(wait < 3600)
wait = 3600;
rnd = wait/10;
rest = wait-rnd;
- rnd = (uint32_t)ub_random_max(env->rnd, (long int)rnd);
+ rnd = (time_t)ub_random_max(env->rnd, (long int)rnd);
return (time_t)(*env->now + rest + rnd);
}
{
struct timeval tv;
#ifndef S_SPLINT_S
- uint32_t next = (uint32_t)wait_probe_time(env->anchors);
+ time_t next = (time_t)wait_probe_time(env->anchors);
/* in case this is libunbound, no timer */
if(!env->probe_timer)
return;
/** fetch first to-probe trust-anchor and lock it and set retrytime */
static struct trust_anchor*
-todo_probe(struct module_env* env, uint32_t* next)
+todo_probe(struct module_env* env, time_t* next)
{
struct trust_anchor* tp;
rbnode_t* el;
lock_basic_lock(&tp->lock);
/* is it eligible? */
- if((uint32_t)tp->autr->next_probe_time > *env->now) {
+ if((time_t)tp->autr->next_probe_time > *env->now) {
/* no more to probe */
- *next = (uint32_t)tp->autr->next_probe_time - *env->now;
+ *next = (time_t)tp->autr->next_probe_time - *env->now;
lock_basic_unlock(&tp->lock);
lock_basic_unlock(&env->anchors->lock);
return NULL;
return tp;
}
-uint32_t
+time_t
autr_probe_timer(struct module_env* env)
{
struct trust_anchor* tp;
- uint32_t next_probe = 3600;
+ time_t next_probe = 3600;
int num = 0;
verbose(VERB_ALGO, "autotrust probe timer callback");
/* while there are still anchors to probe */
time_t next_probe_time;
/** when to query if !failed */
- uint32_t query_interval;
+ time_t query_interval;
/** when to retry if failed */
- uint32_t retry_time;
+ time_t retry_time;
/**
* How many times did it fail. diagnostic only (has no effect).
* @return time of next probe (in seconds from now).
* If 0, then there is no next probe anymore (trust points deleted).
*/
-uint32_t autr_probe_timer(struct module_env* env);
+time_t autr_probe_timer(struct module_env* env);
/** probe tree compare function */
int probetree_cmp(const void* x, const void* y);
free(pkey);
return NULL;
}
- pd->rr_ttl = (uint32_t*)malloc(num*sizeof(uint32_t));
+ pd->rr_ttl = (time_t*)malloc(num*sizeof(time_t));
if(!pd->rr_ttl) {
free(pd->rr_len);
free(pd);
struct key_entry_key*
key_cache_obtain(struct key_cache* kcache, uint8_t* name, size_t namelen,
- uint16_t key_class, struct regional* region, uint32_t now)
+ uint16_t key_class, struct regional* region, time_t now)
{
/* keep looking until we find a nonexpired entry */
while(1) {
*/
struct key_entry_key* key_cache_obtain(struct key_cache* kcache,
uint8_t* name, size_t namelen, uint16_t key_class,
- struct regional* region, uint32_t now);
+ struct regional* region, time_t now);
/**
* Get memory in use by the key cache.
struct key_entry_key*
key_entry_create_null(struct regional* region,
- uint8_t* name, size_t namelen, uint16_t dclass, uint32_t ttl,
- uint32_t now)
+ uint8_t* name, size_t namelen, uint16_t dclass, time_t ttl,
+ time_t now)
{
struct key_entry_key* k;
struct key_entry_data* d;
struct key_entry_key*
key_entry_create_rrset(struct regional* region,
uint8_t* name, size_t namelen, uint16_t dclass,
- struct ub_packed_rrset_key* rrset, uint8_t* sigalg, uint32_t now)
+ struct ub_packed_rrset_key* rrset, uint8_t* sigalg, time_t now)
{
struct key_entry_key* k;
struct key_entry_data* d;
struct key_entry_key*
key_entry_create_bad(struct regional* region,
- uint8_t* name, size_t namelen, uint16_t dclass, uint32_t ttl,
- uint32_t now)
+ uint8_t* name, size_t namelen, uint16_t dclass, time_t ttl,
+ time_t now)
{
struct key_entry_key* k;
struct key_entry_data* d;
*/
struct key_entry_data {
/** the TTL of this entry (absolute time) */
- uint32_t ttl;
+ time_t ttl;
/** the key rrdata. can be NULL to signal keyless name. */
struct packed_rrset_data* rrset_data;
/** not NULL sometimes to give reason why bogus */
* @return new key entry or NULL on alloc failure
*/
struct key_entry_key* key_entry_create_null(struct regional* region,
- uint8_t* name, size_t namelen, uint16_t dclass, uint32_t ttl,
- uint32_t now);
+ uint8_t* name, size_t namelen, uint16_t dclass, time_t ttl,
+ time_t now);
/**
* Create a key entry from an rrset, in the given region.
*/
struct key_entry_key* key_entry_create_rrset(struct regional* region,
uint8_t* name, size_t namelen, uint16_t dclass,
- struct ub_packed_rrset_key* rrset, uint8_t* sigalg, uint32_t now);
+ struct ub_packed_rrset_key* rrset, uint8_t* sigalg, time_t now);
/**
* Create a bad entry, in the given region.
* @return new key entry or NULL on alloc failure
*/
struct key_entry_key* key_entry_create_bad(struct regional* region,
- uint8_t* name, size_t namelen, uint16_t dclass, uint32_t ttl,
- uint32_t now);
+ uint8_t* name, size_t namelen, uint16_t dclass, time_t ttl,
+ time_t now);
/**
* Obtain rrset from a key entry, allocated in region.
}
int val_neg_dlvlookup(struct val_neg_cache* neg, uint8_t* qname, size_t len,
- uint16_t qclass, struct rrset_cache* rrset_cache, uint32_t now)
+ uint16_t qclass, struct rrset_cache* rrset_cache, time_t now)
{
/* lookup closest zone */
struct val_neg_zone* zone;
grab_nsec(struct rrset_cache* rrset_cache, uint8_t* qname, size_t qname_len,
uint16_t qtype, uint16_t qclass, uint32_t flags,
struct regional* region, int checkbit, uint16_t checktype,
- uint32_t now)
+ time_t now)
{
struct ub_packed_rrset_key* r, *k = rrset_cache_lookup(rrset_cache,
qname, qname_len, qtype, qclass, flags, now, 0);
static struct ub_packed_rrset_key*
neg_nsec3_getnc(struct val_neg_zone* zone, uint8_t* hashnc, size_t nclen,
struct rrset_cache* rrset_cache, struct regional* region,
- uint32_t now, uint8_t* b32, size_t maxb32)
+ time_t now, uint8_t* b32, size_t maxb32)
{
struct ub_packed_rrset_key* nc_rrset;
struct val_neg_data* data;
static struct dns_msg*
neg_nsec3_proof_ds(struct val_neg_zone* zone, uint8_t* qname, size_t qname_len,
int qlabs, ldns_buffer* buf, struct rrset_cache* rrset_cache,
- struct regional* region, uint32_t now, uint8_t* topname)
+ struct regional* region, time_t now, uint8_t* topname)
{
struct dns_msg* msg;
struct val_neg_data* data;
* @param zone: val_neg_zone if we have one.
* @return false on lookup or alloc failure.
*/
-static int add_soa(struct rrset_cache* rrset_cache, uint32_t now,
+static int add_soa(struct rrset_cache* rrset_cache, time_t now,
struct regional* region, struct dns_msg* msg, struct val_neg_zone* zone)
{
struct ub_packed_rrset_key* soa;
struct dns_msg*
val_neg_getmsg(struct val_neg_cache* neg, struct query_info* qinfo,
struct regional* region, struct rrset_cache* rrset_cache,
- ldns_buffer* buf, uint32_t now, int addsoa, uint8_t* topname)
+ ldns_buffer* buf, time_t now, int addsoa, uint8_t* topname)
{
struct dns_msg* msg;
struct ub_packed_rrset_key* rrset;
* thus, qname DLV qclass does not exist.
*/
int val_neg_dlvlookup(struct val_neg_cache* neg, uint8_t* qname, size_t len,
- uint16_t qclass, struct rrset_cache* rrset_cache, uint32_t now);
+ uint16_t qclass, struct rrset_cache* rrset_cache, time_t now);
/**
* For the given query, try to get a reply out of the negative cache.
*/
struct dns_msg* val_neg_getmsg(struct val_neg_cache* neg,
struct query_info* qinfo, struct regional* region,
- struct rrset_cache* rrset_cache, ldns_buffer* buf, uint32_t now,
+ struct rrset_cache* rrset_cache, ldns_buffer* buf, time_t now,
int addsoa, uint8_t* topname);
enum sec_status
val_nsec_prove_nodata_dsreply(struct module_env* env, struct val_env* ve,
struct query_info* qinfo, struct reply_info* rep,
- struct key_entry_key* kkey, uint32_t* proof_ttl, char** reason)
+ struct key_entry_key* kkey, time_t* proof_ttl, char** reason)
{
struct ub_packed_rrset_key* nsec = reply_find_rrset_section_ns(
rep, qinfo->qname, qinfo->qname_len, LDNS_RR_TYPE_NSEC,
enum sec_status val_nsec_prove_nodata_dsreply(struct module_env* env,
struct val_env* ve, struct query_info* qinfo,
struct reply_info* rep, struct key_entry_key* kkey,
- uint32_t* proof_ttl, char** reason);
+ time_t* proof_ttl, char** reason);
/**
* nsec typemap check, takes an NSEC-type bitmap as argument, checks for type.
enum sec_status
dnskeyset_verify_rrset_sig(struct module_env* env, struct val_env* ve,
- uint32_t now, struct ub_packed_rrset_key* rrset,
+ time_t now, struct ub_packed_rrset_key* rrset,
struct ub_packed_rrset_key* dnskey, size_t sig_idx,
struct rbtree_t** sortree, char** reason)
{
*
* Use the smallest of these.
*/
- if(d->ttl > (uint32_t)origttl) {
+ if(d->ttl > (time_t)origttl) {
verbose(VERB_QUERY, "rrset TTL larger than original TTL,"
" adjusting TTL downwards");
d->ttl = origttl;
}
- if(expittl > 0 && d->ttl > (uint32_t)expittl) {
+ if(expittl > 0 && d->ttl > (time_t)expittl) {
verbose(VERB_ALGO, "rrset TTL larger than sig expiration ttl,"
" adjusting TTL downwards");
d->ttl = expittl;
enum sec_status
dnskey_verify_rrset_sig(struct regional* region, ldns_buffer* buf,
- struct val_env* ve, uint32_t now,
+ struct val_env* ve, time_t now,
struct ub_packed_rrset_key* rrset, struct ub_packed_rrset_key* dnskey,
size_t dnskey_idx, size_t sig_idx,
struct rbtree_t** sortree, int* buf_canon, char** reason)
* or unchecked on error.
*/
enum sec_status dnskeyset_verify_rrset_sig(struct module_env* env,
- struct val_env* ve, uint32_t now, struct ub_packed_rrset_key* rrset,
+ struct val_env* ve, time_t now, struct ub_packed_rrset_key* rrset,
struct ub_packed_rrset_key* dnskey, size_t sig_idx,
struct rbtree_t** sortree, char** reason);
* bogus if it did not validate.
*/
enum sec_status dnskey_verify_rrset_sig(struct regional* region,
- ldns_buffer* buf, struct val_env* ve, uint32_t now,
+ ldns_buffer* buf, struct val_env* ve, time_t now,
struct ub_packed_rrset_key* rrset, struct ub_packed_rrset_key* dnskey,
size_t dnskey_idx, size_t sig_idx,
struct rbtree_t** sortree, int* buf_canon, char** reason);
subtype == VAL_CLASS_NAMEERROR) {
/* NODATA means that the qname exists, but that there was
* no DS. This is a pretty normal case. */
- uint32_t proof_ttl = 0;
+ time_t proof_ttl = 0;
enum sec_status sec;
/* make sure there are NSECs or NSEC3s with signatures */