From: Wouter Wijngaards Date: Tue, 7 Aug 2007 09:24:20 +0000 (+0000) Subject: id number change X-Git-Tag: release-0.5~145 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c3a5bcb8d40c08c73aea44043dc1ed8517da9744;p=thirdparty%2Funbound.git id number change git-svn-id: file:///svn/unbound/trunk@496 be551aaa-1e26-0410-a405-d3ace91eadb9 --- diff --git a/doc/Changelog b/doc/Changelog index d8f394449..2a70cf592 100644 --- a/doc/Changelog +++ b/doc/Changelog @@ -1,5 +1,8 @@ 7 August 2007: Wouter - security status type. + - security status is copied when rdata is equal for rrsets. + - rrset id is updated to invalidate all the message cache entries + that refer to NSEC, NSEC3, DNAME rrsets that have changed. 6 August 2007: Wouter - key cache for validator. diff --git a/services/cache/rrset.c b/services/cache/rrset.c index a40f0f63d..1451227f2 100644 --- a/services/cache/rrset.c +++ b/services/cache/rrset.c @@ -45,6 +45,7 @@ #include "util/data/packed_rrset.h" #include "util/data/msgreply.h" #include "util/region-allocator.h" +#include "util/alloc.h" struct rrset_cache* rrset_cache_create(struct config_file* cfg, struct alloc_cache* alloc) @@ -110,7 +111,7 @@ rrset_cache_touch(struct rrset_cache* r, struct ub_packed_rrset_key* key, /** see if rrset needs to be updated in the cache */ static int -need_to_update_rrset(void* nd, void* cd, uint32_t timenow) +need_to_update_rrset(void* nd, void* cd, uint32_t timenow, int equal) { struct packed_rrset_data* newd = (struct packed_rrset_data*)nd; struct packed_rrset_data* cached = (struct packed_rrset_data*)cd; @@ -121,22 +122,38 @@ need_to_update_rrset(void* nd, void* cd, uint32_t timenow) if( cached->ttl < timenow ) return 1; /* o same trust, but different in data - insert it */ - if( newd->trust == cached->trust && - !rrsetdata_equal(newd, cached)) + if( newd->trust == cached->trust && !equal ) return 1; /* o see if TTL is better than TTL in cache. */ /* if so, see if rrset+rdata is the same */ /* if so, update TTL in cache, even if trust is worse. */ - if( newd->ttl > cached->ttl && - rrsetdata_equal(newd, cached)) { + if( newd->ttl > cached->ttl && equal ) { /* since all else is the same, use the best trust value */ - if(newd->trust < cached->trust) + if(newd->trust < cached->trust) { newd->trust = cached->trust; + newd->security = cached->security; + } return 1; } return 0; } +/** Update RRSet special key ID */ +static void +rrset_update_id(struct rrset_ref* ref, struct alloc_cache* alloc) +{ + /* this may clear the cache and invalidate lock below */ + uint64_t newid = alloc_get_id(alloc); + /* obtain writelock */ + lock_rw_wrlock(&ref->key->entry.lock); + /* check if it was deleted in the meantime, if so, skip update */ + if(ref->key->id == ref->id) { + ref->key->id = newid; + ref->id = newid; + } + lock_rw_unlock(&ref->key->entry.lock); +} + int rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref, struct alloc_cache* alloc, uint32_t timenow) @@ -144,6 +161,8 @@ rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref, struct lruhash_entry* e; struct ub_packed_rrset_key* k = ref->key; hashvalue_t h = k->entry.hash; + uint16_t rrset_type = ntohs(k->rk.type); + int equal = 0; /* looks up item with a readlock - no editing! */ if((e=slabhash_lookup(&r->table, h, k, 0)) != 0) { /* return id and key as they will be used in the cache @@ -155,7 +174,10 @@ rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref, */ ref->key = (struct ub_packed_rrset_key*)e->key; ref->id = ref->key->id; - if(!need_to_update_rrset(k->entry.data, e->data, timenow)) { + equal = rrsetdata_equal((struct packed_rrset_data*)k->entry. + data, (struct packed_rrset_data*)e->data); + if(!need_to_update_rrset(k->entry.data, e->data, timenow, + equal)) { /* cache is superior, return that value */ lock_rw_unlock(&e->lock); ub_packed_rrset_parsedelete(k, alloc); @@ -171,8 +193,17 @@ rrset_cache_update(struct rrset_cache* r, struct rrset_ref* ref, * cache size values nicely. */ } slabhash_insert(&r->table, h, &k->entry, k->entry.data, alloc); - if(e) + if(e) { + /* For NSEC, NSEC3, DNAME, when rdata is updated, update + * the ID number so that proofs in message cache are + * invalidated */ + if((rrset_type == LDNS_RR_TYPE_NSEC + || rrset_type == LDNS_RR_TYPE_NSEC3 + || rrset_type == LDNS_RR_TYPE_DNAME) && !equal) { + rrset_update_id(ref, alloc); + } return 1; + } return 0; } diff --git a/util/alloc.c b/util/alloc.c index a652706fb..fcdf68871 100644 --- a/util/alloc.c +++ b/util/alloc.c @@ -128,16 +128,21 @@ alloc_clear(struct alloc_cache* alloc) alloc->num_quar = 0; } -/** get a new id */ -static void -alloc_get_id(struct alloc_cache* alloc, alloc_special_t* t) +uint64_t +alloc_get_id(struct alloc_cache* alloc) { - t->id = alloc->next_id++; - if(alloc->next_id == alloc->last_id) { + uint64_t id = alloc->next_id++; + if(id == alloc->last_id) { /* TODO: clear the rrset cache */ log_warn("Out of ids. Clearing cache."); + /* start back at first number */ /* like in alloc_init*/ + alloc->next_id = (uint64_t)alloc->thread_num; + alloc->next_id <<= THRNUM_SHIFT; /* in steps for comp. */ + alloc->next_id += 1; /* portability. */ + /* and generate new and safe id */ + id = alloc->next_id++; } - alloc_set_special_next(t, 0); + return id; } alloc_special_t* @@ -150,7 +155,7 @@ alloc_special_obtain(struct alloc_cache* alloc) p = alloc->quar; alloc->quar = alloc_special_next(p); alloc->num_quar--; - alloc_get_id(alloc, p); + p->id = alloc_get_id(alloc); return p; } /* see if in global cache */ @@ -164,7 +169,7 @@ alloc_special_obtain(struct alloc_cache* alloc) } lock_quick_unlock(&alloc->super->lock); if(p) { - alloc_get_id(alloc, p); + p->id = alloc_get_id(alloc); return p; } } @@ -173,7 +178,7 @@ alloc_special_obtain(struct alloc_cache* alloc) if(!(p = (alloc_special_t*)malloc(sizeof(alloc_special_t)))) fatal_exit("alloc_special_obtain: out of memory"); alloc_setup_special(p); - alloc_get_id(alloc, p); + p->id = alloc_get_id(alloc); return p; } diff --git a/util/alloc.h b/util/alloc.h index 761213e8b..1bf8e4e82 100644 --- a/util/alloc.h +++ b/util/alloc.h @@ -119,6 +119,14 @@ alloc_special_t* alloc_special_obtain(struct alloc_cache* alloc); */ void alloc_special_release(struct alloc_cache* alloc, alloc_special_t* mem); +/** + * Set ID number of special type to a fresh new ID number. + * In case of ID number overflow, the rrset cache has to be cleared. + * @param alloc: the alloc cache + * @return: fresh id is returned. + */ +uint64_t alloc_get_id(struct alloc_cache* alloc); + /** * Get memory size of alloc cache, alloc structure including special types. * @param alloc: on what alloc. diff --git a/validator/validator.c b/validator/validator.c index c83e9e944..4d077133d 100644 --- a/validator/validator.c +++ b/validator/validator.c @@ -181,14 +181,12 @@ needs_validation(struct module_qstate* qstate, struct val_qstate* vq) return 0; } - /* TODO: check if already validated */ - /* - * if (response.getStatus() > SecurityStatus.BOGUS) - * { - * log.debug("response has already been validated"); - * return false; - * } - */ + /* validate unchecked, and re-validate bogus messages */ + if (vq->orig_msg->rep->security > sec_status_bogus) + { + verbose(VERB_ALGO, "response has already been validated"); + return 0; + } rcode = (int)FLAGS_GET_RCODE(vq->orig_msg->rep->flags); if(rcode != LDNS_RCODE_NOERROR && rcode != LDNS_RCODE_NXDOMAIN) {