if(!x) {
/* not found, create the zone */
x = auth_xfer_create(az, z);
- lock_basic_lock(&x->lock);
} else {
lock_basic_lock(&x->lock);
}
/* 0 rrsigs to move over, done */
return 1;
}
- log_info("moveover %d sigs size %d", (int)sigs, (int)sigsz);
/* allocate rrset sigsz larger for extra sigs elements, and
* allocate rrsig sigsz smaller for less sigs elements. */
case LDNS_RDF_TYPE_DNAME:
sldns_buffer_set_position(&pktbuf,
(size_t)(rd -
- sldns_buffer_current(&pktbuf)));
+ sldns_buffer_begin(&pktbuf)));
oldpos = sldns_buffer_position(&pktbuf);
/* moves pktbuf to right after the
* compressed dname, and returns uncompressed
if(!sldns_buffer_available(buf, rdlen)) return 0;
sldns_buffer_write(buf, rd, rdlen);
}
- sldns_buffer_flip(buf);
-
/* fixup rdlength */
sldns_buffer_write_u16_at(buf, rdlenpos,
sldns_buffer_position(buf)-rdlenpos-2);
+ sldns_buffer_flip(buf);
return 1;
}
}
rr = sldns_buffer_begin(scratch_buffer);
rr_len = sldns_buffer_limit(scratch_buffer);
+ char buf[512];
+ (void)sldns_wire2str_rr_buf(rr, rr_len, buf, sizeof(buf));
+ log_info("decompress is %s", buf);
dname_len = dname_valid(rr, rr_len);
return az_insert_rr(z, rr, rr_len, dname_len, duplicate);
}
char wcname[256];
sldns_wire2str_dname_buf(wildcard->name, wildcard->namelen,
wcname, sizeof(wcname));
- log_info("wildcard %s", wcname);
}
if((rrset=az_domain_rrset(wildcard, qinfo->qtype)) != NULL) {
/* wildcard has type, add it */
lock_rw_unlock(&az->lock);
return 0;
}
+ lock_rw_rdlock(&z->lock);
lock_rw_unlock(&az->lock);
if(!z->for_downstream) {
lock_rw_unlock(&z->lock);
struct query_info qinfo;
uint32_t serial;
int have_zone;
- lock_basic_lock(&xfr->lock);
have_zone = xfr->have_zone;
serial = xfr->serial;
- lock_basic_unlock(&xfr->lock);
memset(&qinfo, 0, sizeof(qinfo));
qinfo.qname = xfr->name;
/* already at end of chunks? */
if(!*rr_chunk)
return;
+ /* move within this chunk */
+ if((*rr_chunk)->len >= LDNS_HEADER_SIZE &&
+ (*rr_num)+1 < (int)LDNS_ANCOUNT((*rr_chunk)->data)) {
+ (*rr_num) += 1;
+ *rr_pos = rr_nextpos;
+ return;
+ }
+ /* no more RRs in this chunk */
+ /* continue with next chunk, see if it has RRs */
+ if(*rr_chunk)
+ *rr_chunk = (*rr_chunk)->next;
while(*rr_chunk) {
- /* move within this chunk */
+ *rr_num = 0;
+ *rr_pos = 0;
if((*rr_chunk)->len >= LDNS_HEADER_SIZE &&
- (*rr_num)+1 < (int)LDNS_ANCOUNT((*rr_chunk)->data)) {
- (*rr_num) += 1;
- *rr_pos = rr_nextpos;
- return ;
+ LDNS_ANCOUNT((*rr_chunk)->data) > 0) {
+ return;
}
-
- /* no more RRs in this chunk */
- /* continue with next chunk, see if it has RRs */
*rr_chunk = (*rr_chunk)->next;
- *rr_num = 0;
- *rr_pos = 0;
}
}
/* fetch rr information */
sldns_buffer_init_frm_data(&pkt, rr_chunk->data, rr_chunk->len);
- sldns_buffer_set_position(&pkt, rr_pos);
+ if(rr_pos == 0) {
+ size_t i;
+ /* skip question section */
+ sldns_buffer_set_position(&pkt, LDNS_HEADER_SIZE);
+ for(i=0; i<LDNS_QDCOUNT(rr_chunk->data); i++) {
+ if(pkt_dname_len(&pkt) ==0) return 0;
+ if(sldns_buffer_remaining(&pkt) < 4) return 0;
+ sldns_buffer_skip(&pkt, 4); /* type and class */
+ }
+ } else {
+ sldns_buffer_set_position(&pkt, rr_pos);
+ }
*rr_dname = sldns_buffer_current(&pkt);
if(pkt_dname_len(&pkt) == 0) return 0;
if(sldns_buffer_remaining(&pkt) < 10) return 0;
return 1;
}
+/** print log message where we are in parsing the zone transfer */
+static void
+log_rrlist_position(const char* label, struct auth_chunk* rr_chunk,
+ uint8_t* rr_dname, uint16_t rr_type, size_t rr_counter)
+{
+ sldns_buffer pkt;
+ size_t dlen;
+ uint8_t buf[256];
+ char str[256];
+ char typestr[32];
+ sldns_buffer_init_frm_data(&pkt, rr_chunk->data, rr_chunk->len);
+ sldns_buffer_set_position(&pkt, (size_t)(rr_dname -
+ sldns_buffer_begin(&pkt)));
+ if((dlen=pkt_dname_len(&pkt)) == 0) return;
+ if(dlen >= sizeof(buf)) return;
+ dname_pkt_copy(&pkt, buf, rr_dname);
+ dname_str(buf, str);
+ (void)sldns_wire2str_type_buf(rr_type, typestr, sizeof(typestr));
+ verbose(VERB_ALGO, "%s at[%d] %s %s", label, (int)rr_counter,
+ str, typestr);
+}
+
/** apply IXFR to zone in memory. z is locked. false on failure(mallocfail) */
static int
apply_ixfr(struct auth_xfer* xfr, struct auth_zone* z,
/* failed to parse RR */
return 0;
}
+ if(verbosity>=VERB_ALGO) log_rrlist_position("apply_ixfr",
+ rr_chunk, rr_dname, rr_type, rr_counter);
/* twiddle add/del mode and check for start and end */
if(rr_counter == 0 && rr_type != LDNS_RR_TYPE_SOA)
return 0;
uint32_t serial = 0;
size_t rr_nextpos;
size_t rr_counter = 0;
+ int have_end_soa = 0;
/* clear the data tree */
traverse_postorder(&z->data, auth_data_del, NULL);
/* failed to parse RR */
return 0;
}
+ if(verbosity>=VERB_ALGO) log_rrlist_position("apply_axfr",
+ rr_chunk, rr_dname, rr_type, rr_counter);
if(rr_type == LDNS_RR_TYPE_SOA) {
if(rr_counter != 0) {
/* end of the axfr */
+ have_end_soa = 1;
break;
}
if(rr_rdlen < 22) return 0; /* bad SOA rdlen */
rr_counter++;
chunk_rrlist_gonext(&rr_chunk, &rr_num, &rr_pos, rr_nextpos);
}
+ if(!have_end_soa) {
+ log_err("no end SOA record for AXFR");
+ return 0;
+ }
xfr->serial = serial;
xfr->have_zone = 1;
* and we may then get an instant cache response,
* and that calls the callback just like a full
* lookup and lookup failures also call callback */
+ lock_basic_unlock(&xfr->lock);
return;
}
xfr_transfer_move_to_next_lookup(xfr, env);
xfr->task_transfer->master = xfr_transfer_current_master(xfr);
if(xfr_transfer_init_fetch(xfr, env)) {
/* successfully started, wait for callback */
+ lock_basic_unlock(&xfr->lock);
return;
}
/* failed to fetch, next master */
xfr_transfer_nextmaster(xfr);
}
- lock_basic_lock(&xfr->lock);
/* we failed to fetch the zone, move to wait task
* use the shorter retry timeout */
xfr_transfer_disown(xfr);
struct module_env* env;
log_assert(xfr->task_transfer);
env = xfr->task_transfer->env;
+ lock_basic_lock(&xfr->lock);
/* process result */
if(rcode == LDNS_RCODE_NOERROR) {
/* it worked! */
auth_chunks_delete(xfr->task_transfer);
- lock_basic_lock(&xfr->lock);
/* we fetched the zone, move to wait task */
xfr_transfer_disown(xfr);
log_assert(xfr->task_transfer->chunks_last == NULL);
xfr->task_transfer->worker = env->worker;
xfr->task_transfer->env = env;
- lock_basic_unlock(&xfr->lock);
/* init transfer process */
/* find that master in the transfer's list of masters? */
xfr_probe_disown(xfr);
xfr_start_transfer(xfr, env,
xfr_probe_current_master(xfr));
- lock_basic_unlock(&xfr->lock);
return 0;
}
sizeof(xfr->task_probe->worker));
lock_protect(&xfr->lock, &xfr->task_transfer->worker,
sizeof(xfr->task_transfer->worker));
+ lock_basic_lock(&xfr->lock);
return xfr;
}
struct fake_pending* p = runtime->pending_list;
/* slow, O(N*N), but it works as advertised with weird matching */
while(p) {
+ if(p->tcp_pkt_counter != 0) {
+ /* continue tcp transfer */
+ *pend = p;
+ return 1;
+ }
if(pending_find_match(runtime, entry, p)) {
*pend = p;
return 1;
}
}
+/** number of replies in entry */
+static int
+count_reply_packets(struct entry* entry)
+{
+ int count = 0;
+ struct reply_packet* reppkt = entry->reply_list;
+ while(reppkt) {
+ count++;
+ reppkt = reppkt->next;
+ }
+ return count;
+}
+
/**
* Fill buffer with reply from the entry.
*/
static void
fill_buffer_with_reply(sldns_buffer* buffer, struct entry* entry, uint8_t* q,
- size_t qlen)
+ size_t qlen, int tcp_pkt_counter)
{
+ struct reply_packet* reppkt;
uint8_t* c;
size_t clen;
log_assert(entry && entry->reply_list);
sldns_buffer_clear(buffer);
- if(entry->reply_list->reply_from_hex) {
- c = sldns_buffer_begin(entry->reply_list->reply_from_hex);
- clen = sldns_buffer_limit(entry->reply_list->reply_from_hex);
+ reppkt = entry->reply_list;
+ if(tcp_pkt_counter > 0) {
+ int i = tcp_pkt_counter;
+ while(reppkt && i--)
+ reppkt = reppkt->next;
+ log_pkt("extra_packet ", reppkt->reply_pkt, reppkt->reply_len);
+ }
+ if(reppkt->reply_from_hex) {
+ c = sldns_buffer_begin(reppkt->reply_from_hex);
+ clen = sldns_buffer_limit(reppkt->reply_from_hex);
if(!c) fatal_exit("out of memory");
} else {
- c = entry->reply_list->reply_pkt;
- clen = entry->reply_list->reply_len;
+ c = reppkt->reply_pkt;
+ clen = reppkt->reply_len;
}
if(c) {
if(q) adjust_packet(entry, &c, &clen, q, qlen);
c.type = comm_udp;
if(pend->transport == transport_tcp)
c.type = comm_tcp;
- fill_buffer_with_reply(c.buffer, entry, pend->pkt, pend->pkt_len);
+ fill_buffer_with_reply(c.buffer, entry, pend->pkt, pend->pkt_len,
+ pend->tcp_pkt_counter);
repinfo.c = &c;
repinfo.addrlen = pend->addrlen;
memcpy(&repinfo.addr, &pend->addr, pend->addrlen);
- if(!pend->serviced)
- pending_list_delete(runtime, pend);
+ if(!pend->serviced) {
+ if(entry->reply_list->next &&
+ pend->tcp_pkt_counter < count_reply_packets(entry)) {
+ /* go to next packet next time */
+ pend->tcp_pkt_counter++;
+ } else {
+ pending_list_delete(runtime, pend);
+ }
+ }
if((*cb)(&c, cb_arg, NETEVENT_NOERROR, &repinfo)) {
fatal_exit("testbound: unexpected: callback returned 1");
}
if(todo->match->match_transport == transport_tcp)
repinfo.c->type = comm_tcp;
else repinfo.c->type = comm_udp;
- fill_buffer_with_reply(repinfo.c->buffer, todo->match, NULL, 0);
+ fill_buffer_with_reply(repinfo.c->buffer, todo->match, NULL, 0, 0);
log_info("testbound: incoming QUERY");
log_pkt("query pkt", todo->match->reply_list->reply_pkt,
todo->match->reply_list->reply_len);
c.type = comm_tcp;
if(todo->evt_type == repevt_back_reply && todo->match) {
fill_buffer_with_reply(c.buffer, todo->match, p->pkt,
- p->pkt_len);
+ p->pkt_len, p->tcp_pkt_counter);
}
repinfo.c = &c;
repinfo.addrlen = p->addrlen;
memcpy(&repinfo.addr, &p->addr, p->addrlen);
- if(!p->serviced)
- pending_list_delete(runtime, p);
+ if(!p->serviced) {
+ if(todo->match->reply_list->next && !error &&
+ p->tcp_pkt_counter < count_reply_packets(todo->match)) {
+ /* go to next packet next time */
+ p->tcp_pkt_counter++;
+ } else {
+ pending_list_delete(runtime, p);
+ }
+ }
if((*cb)(&c, cb_arg, error, &repinfo)) {
fatal_exit("unexpected: pending callback returned 1");
}