]> git.ipfire.org Git - thirdparty/squid.git/blob - src/peer_digest.cc
Turned flags to bool types for PeerDigest, RefreshPattern,
[thirdparty/squid.git] / src / peer_digest.cc
1
2 /*
3 * DEBUG: section 72 Peer Digest Routines
4 * AUTHOR: Alex Rousskov
5 *
6 * SQUID Web Proxy Cache http://www.squid-cache.org/
7 * ----------------------------------------------------------
8 *
9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
31 *
32 */
33
34 #include "squid.h"
35 #if USE_CACHE_DIGESTS
36 #include "CacheDigest.h"
37 #include "CachePeer.h"
38 #include "event.h"
39 #include "forward.h"
40 #include "globals.h"
41 #include "HttpReply.h"
42 #include "HttpRequest.h"
43 #include "internal.h"
44 #include "MemObject.h"
45 #include "neighbors.h"
46 #include "mime_header.h"
47 #include "PeerDigest.h"
48 #include "SquidTime.h"
49 #include "Store.h"
50 #include "store_key_md5.h"
51 #include "StoreClient.h"
52 #include "tools.h"
53
54 /* local types */
55
56 /* local prototypes */
57 static time_t peerDigestIncDelay(const PeerDigest * pd);
58 static time_t peerDigestNewDelay(const StoreEntry * e);
59 static void peerDigestSetCheck(PeerDigest * pd, time_t delay);
60 static void peerDigestClean(PeerDigest *);
61 static EVH peerDigestCheck;
62 static void peerDigestRequest(PeerDigest * pd);
63 static STCB peerDigestHandleReply;
64 static int peerDigestFetchReply(void *, char *, ssize_t);
65 int peerDigestSwapInHeaders(void *, char *, ssize_t);
66 int peerDigestSwapInCBlock(void *, char *, ssize_t);
67 int peerDigestSwapInMask(void *, char *, ssize_t);
68 static int peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name);
69 static void peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason);
70 static void peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason);
71 static void peerDigestReqFinish(DigestFetchState * fetch, char *buf, int, int, int, const char *reason, int err);
72 static void peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err);
73 static void peerDigestFetchFinish(DigestFetchState * fetch, int err);
74 static void peerDigestFetchSetStats(DigestFetchState * fetch);
75 static int peerDigestSetCBlock(PeerDigest * pd, const char *buf);
76 static int peerDigestUseful(const PeerDigest * pd);
77
78 /* local constants */
79 Version const CacheDigestVer = { 5, 3 };
80
81 #define StoreDigestCBlockSize sizeof(StoreDigestCBlock)
82
83 /* min interval for requesting digests from a given peer */
84 static const time_t PeerDigestReqMinGap = 5 * 60; /* seconds */
85 /* min interval for requesting digests (cumulative request stream) */
86 static const time_t GlobDigestReqMinGap = 1 * 60; /* seconds */
87
88 /* local vars */
89
90 static time_t pd_last_req_time = 0; /* last call to Check */
91
92 /* initialize peer digest */
93 static void
94 peerDigestInit(PeerDigest * pd, CachePeer * p)
95 {
96 assert(pd && p);
97
98 memset(pd, 0, sizeof(*pd));
99 /*
100 * DPW 2007-04-12
101 * Lock on to the peer here. The corresponding cbdataReferenceDone()
102 * is in peerDigestDestroy().
103 */
104 pd->peer = cbdataReference(p);
105 /* if peer disappears, we will know it's name */
106 pd->host = p->host;
107
108 pd->times.initialized = squid_curtime;
109 }
110
111 static void
112 peerDigestClean(PeerDigest * pd)
113 {
114 assert(pd);
115
116 if (pd->cd)
117 cacheDigestDestroy(pd->cd);
118
119 pd->host.clean();
120 }
121
122 CBDATA_CLASS_INIT(PeerDigest);
123
124 void *
125 PeerDigest::operator new (size_t)
126 {
127 CBDATA_INIT_TYPE(PeerDigest);
128 PeerDigest *result = cbdataAlloc(PeerDigest);
129 return result;
130 }
131
132 void
133 PeerDigest::operator delete (void *address)
134 {
135 PeerDigest *t = static_cast<PeerDigest *>(address);
136 cbdataFree(t);
137 }
138
139 /* allocate new peer digest, call Init, and lock everything */
140 PeerDigest *
141 peerDigestCreate(CachePeer * p)
142 {
143 PeerDigest *pd;
144 assert(p);
145
146 pd = new PeerDigest;
147 peerDigestInit(pd, p);
148
149 /* XXX This does not look right, and the same thing again in the caller */
150 return cbdataReference(pd);
151 }
152
153 /* call Clean and free/unlock everything */
154 static void
155 peerDigestDestroy(PeerDigest * pd)
156 {
157 void *p;
158 assert(pd);
159 void * peerTmp = pd->peer;
160
161 /*
162 * DPW 2007-04-12
163 * We locked the peer in peerDigestInit(), this is
164 * where we unlock it. If the peer is still valid,
165 * tell it that the digest is gone.
166 */
167 if (cbdataReferenceValidDone(peerTmp, &p))
168 peerNoteDigestGone((CachePeer *)p);
169
170 peerDigestClean(pd);
171
172 delete pd;
173 }
174
175 /* called by peer to indicate that somebody actually needs this digest */
176 void
177 peerDigestNeeded(PeerDigest * pd)
178 {
179 assert(pd);
180 assert(!pd->flags.needed);
181 assert(!pd->cd);
182
183 pd->flags.needed = true;
184 pd->times.needed = squid_curtime;
185 peerDigestSetCheck(pd, 0); /* check asap */
186 }
187
188 /* currently we do not have a reason to disable without destroying */
189 #if FUTURE_CODE
190 /* disables peer for good */
191 static void
192 peerDigestDisable(PeerDigest * pd)
193 {
194 debugs(72, 2, "peerDigestDisable: peer " << pd->host.buf() << " disabled for good");
195 pd->times.disabled = squid_curtime;
196 pd->times.next_check = -1; /* never */
197 pd->flags.usable = 0;
198
199 if (pd->cd) {
200 cacheDigestDestroy(pd->cd);
201 pd->cd = NULL;
202 }
203
204 /* we do not destroy the pd itself to preserve its "history" and stats */
205 }
206
207 #endif
208
209 /* increment retry delay [after an unsuccessful attempt] */
210 static time_t
211 peerDigestIncDelay(const PeerDigest * pd)
212 {
213 assert(pd);
214 return pd->times.retry_delay > 0 ?
215 2 * pd->times.retry_delay : /* exponential backoff */
216 PeerDigestReqMinGap; /* minimal delay */
217 }
218
219 /* artificially increases Expires: setting to avoid race conditions
220 * returns the delay till that [increased] expiration time */
221 static time_t
222 peerDigestNewDelay(const StoreEntry * e)
223 {
224 assert(e);
225
226 if (e->expires > 0)
227 return e->expires + PeerDigestReqMinGap - squid_curtime;
228
229 return PeerDigestReqMinGap;
230 }
231
232 /* registers next digest verification */
233 static void
234 peerDigestSetCheck(PeerDigest * pd, time_t delay)
235 {
236 eventAdd("peerDigestCheck", peerDigestCheck, pd, (double) delay, 1);
237 pd->times.next_check = squid_curtime + delay;
238 debugs(72, 3, "peerDigestSetCheck: will check peer " << pd->host << " in " << delay << " secs");
239 }
240
241 /*
242 * called when peer is about to disappear or have already disappeared
243 */
244 void
245 peerDigestNotePeerGone(PeerDigest * pd)
246 {
247 if (pd->flags.requested) {
248 debugs(72, 2, "peerDigest: peer " << pd->host << " gone, will destroy after fetch.");
249 /* do nothing now, the fetching chain will notice and take action */
250 } else {
251 debugs(72, 2, "peerDigest: peer " << pd->host << " is gone, destroying now.");
252 peerDigestDestroy(pd);
253 }
254 }
255
256 /* callback for eventAdd() (with peer digest locked)
257 * request new digest if our copy is too old or if we lack one;
258 * schedule next check otherwise */
259 static void
260 peerDigestCheck(void *data)
261 {
262 PeerDigest *pd = (PeerDigest *)data;
263 time_t req_time;
264
265 assert(!pd->flags.requested);
266
267 pd->times.next_check = 0; /* unknown */
268
269 if (!cbdataReferenceValid(pd->peer)) {
270 peerDigestNotePeerGone(pd);
271 return;
272 }
273
274 debugs(72, 3, "peerDigestCheck: peer " << pd->peer->host << ":" << pd->peer->http_port);
275 debugs(72, 3, "peerDigestCheck: time: " << squid_curtime <<
276 ", last received: " << (long int) pd->times.received << " (" <<
277 std::showpos << (int) (squid_curtime - pd->times.received) << ")");
278
279 /* decide when we should send the request:
280 * request now unless too close to other requests */
281 req_time = squid_curtime;
282
283 /* per-peer limit */
284
285 if (req_time - pd->times.received < PeerDigestReqMinGap) {
286 debugs(72, 2, "peerDigestCheck: " << pd->host <<
287 ", avoiding close peer requests (" <<
288 (int) (req_time - pd->times.received) << " < " <<
289 (int) PeerDigestReqMinGap << " secs).");
290
291 req_time = pd->times.received + PeerDigestReqMinGap;
292 }
293
294 /* global limit */
295 if (req_time - pd_last_req_time < GlobDigestReqMinGap) {
296 debugs(72, 2, "peerDigestCheck: " << pd->host <<
297 ", avoiding close requests (" <<
298 (int) (req_time - pd_last_req_time) << " < " <<
299 (int) GlobDigestReqMinGap << " secs).");
300
301 req_time = pd_last_req_time + GlobDigestReqMinGap;
302 }
303
304 if (req_time <= squid_curtime)
305 peerDigestRequest(pd); /* will set pd->flags.requested */
306 else
307 peerDigestSetCheck(pd, req_time - squid_curtime);
308 }
309
310 CBDATA_TYPE(DigestFetchState);
311
312 /* ask store for a digest */
313 static void
314 peerDigestRequest(PeerDigest * pd)
315 {
316 CachePeer *p = pd->peer;
317 StoreEntry *e, *old_e;
318 char *url;
319 const cache_key *key;
320 HttpRequest *req;
321 DigestFetchState *fetch = NULL;
322 StoreIOBuffer tempBuffer;
323
324 pd->req_result = NULL;
325 pd->flags.requested = true;
326
327 /* compute future request components */
328
329 if (p->digest_url)
330 url = xstrdup(p->digest_url);
331 else
332 url = internalRemoteUri(p->host, p->http_port,
333 "/squid-internal-periodic/", StoreDigestFileName);
334
335 req = HttpRequest::CreateFromUrl(url);
336
337 assert(req);
338
339 key = storeKeyPublicByRequest(req);
340
341 debugs(72, 2, "peerDigestRequest: " << url << " key: " << storeKeyText(key));
342
343 /* add custom headers */
344 assert(!req->header.len);
345
346 req->header.putStr(HDR_ACCEPT, StoreDigestMimeStr);
347
348 req->header.putStr(HDR_ACCEPT, "text/html");
349
350 if (p->login)
351 xstrncpy(req->login, p->login, MAX_LOGIN_SZ);
352
353 /* create fetch state structure */
354 CBDATA_INIT_TYPE(DigestFetchState);
355
356 fetch = cbdataAlloc(DigestFetchState);
357
358 fetch->request = HTTPMSGLOCK(req);
359
360 fetch->pd = cbdataReference(pd);
361
362 fetch->offset = 0;
363
364 fetch->state = DIGEST_READ_REPLY;
365
366 /* update timestamps */
367 fetch->start_time = squid_curtime;
368
369 pd->times.requested = squid_curtime;
370
371 pd_last_req_time = squid_curtime;
372
373 req->flags.cachable = true;
374
375 /* the rest is based on clientProcessExpired() */
376 req->flags.refresh = true;
377
378 old_e = fetch->old_entry = Store::Root().get(key);
379
380 if (old_e) {
381 debugs(72, 5, "peerDigestRequest: found old entry");
382
383 old_e->lock();
384 old_e->createMemObject(url, url);
385
386 fetch->old_sc = storeClientListAdd(old_e, fetch);
387 }
388
389 e = fetch->entry = storeCreateEntry(url, url, req->flags, req->method);
390 assert(EBIT_TEST(e->flags, KEY_PRIVATE));
391 fetch->sc = storeClientListAdd(e, fetch);
392 /* set lastmod to trigger IMS request if possible */
393
394 if (old_e)
395 e->lastmod = old_e->lastmod;
396
397 /* push towards peer cache */
398 debugs(72, 3, "peerDigestRequest: forwarding to fwdStart...");
399
400 FwdState::fwdStart(Comm::ConnectionPointer(), e, req);
401
402 tempBuffer.offset = 0;
403
404 tempBuffer.length = SM_PAGE_SIZE;
405
406 tempBuffer.data = fetch->buf;
407
408 storeClientCopy(fetch->sc, e, tempBuffer,
409 peerDigestHandleReply, fetch);
410 }
411
412 /* Handle the data copying .. */
413
414 /*
415 * This routine handles the copy data and then redirects the
416 * copy to a bunch of subfunctions depending upon the copy state.
417 * It also tracks the buffer offset and "seen", since I'm actually
418 * not interested in rewriting everything to suit my little idea.
419 */
420 static void
421 peerDigestHandleReply(void *data, StoreIOBuffer receivedData)
422 {
423 DigestFetchState *fetch = (DigestFetchState *)data;
424 int retsize = -1;
425 digest_read_state_t prevstate;
426 int newsize;
427
428 assert(fetch->pd && receivedData.data);
429 /* The existing code assumes that the received pointer is
430 * where we asked the data to be put
431 */
432 assert(fetch->buf + fetch->bufofs == receivedData.data);
433
434 /* Update the buffer size */
435 fetch->bufofs += receivedData.length;
436
437 assert(fetch->bufofs <= SM_PAGE_SIZE);
438
439 /* If we've fetched enough, return */
440
441 if (peerDigestFetchedEnough(fetch, fetch->buf, fetch->bufofs, "peerDigestHandleReply"))
442 return;
443
444 /* Call the right function based on the state */
445 /* (Those functions will update the state if needed) */
446
447 /* Give us a temporary reference. Some of the calls we make may
448 * try to destroy the fetch structure, and we like to know if they
449 * do
450 */
451 fetch = cbdataReference(fetch);
452
453 /* Repeat this loop until we're out of data OR the state changes */
454 /* (So keep going if the state has changed and we still have data */
455 do {
456 prevstate = fetch->state;
457
458 switch (fetch->state) {
459
460 case DIGEST_READ_REPLY:
461 retsize = peerDigestFetchReply(fetch, fetch->buf, fetch->bufofs);
462 break;
463
464 case DIGEST_READ_HEADERS:
465 retsize = peerDigestSwapInHeaders(fetch, fetch->buf, fetch->bufofs);
466 break;
467
468 case DIGEST_READ_CBLOCK:
469 retsize = peerDigestSwapInCBlock(fetch, fetch->buf, fetch->bufofs);
470 break;
471
472 case DIGEST_READ_MASK:
473 retsize = peerDigestSwapInMask(fetch, fetch->buf, fetch->bufofs);
474 break;
475
476 case DIGEST_READ_NONE:
477 break;
478
479 case DIGEST_READ_DONE:
480 goto finish;
481 break;
482
483 default:
484 fatal("Bad digest transfer mode!\n");
485 }
486
487 if (retsize < 0)
488 goto finish;
489
490 /*
491 * The returned size indicates how much of the buffer was read -
492 * so move the remainder of the buffer to the beginning
493 * and update the bufofs / bufsize
494 */
495 newsize = fetch->bufofs - retsize;
496
497 memmove(fetch->buf, fetch->buf + retsize, fetch->bufofs - newsize);
498
499 fetch->bufofs = newsize;
500
501 } while (cbdataReferenceValid(fetch) && prevstate != fetch->state && fetch->bufofs > 0);
502
503 /* Update the copy offset */
504 fetch->offset += receivedData.length;
505
506 /* Schedule another copy */
507 if (cbdataReferenceValid(fetch)) {
508 StoreIOBuffer tempBuffer;
509 tempBuffer.offset = fetch->offset;
510 tempBuffer.length = SM_PAGE_SIZE - fetch->bufofs;
511 tempBuffer.data = fetch->buf + fetch->bufofs;
512 storeClientCopy(fetch->sc, fetch->entry, tempBuffer,
513 peerDigestHandleReply, fetch);
514 }
515
516 finish:
517 /* Get rid of our reference, we've finished with it for now */
518 cbdataReferenceDone(fetch);
519 }
520
521 /* wait for full http headers to be received then parse them */
522 /*
523 * This routine handles parsing the reply line.
524 * If the reply line indicates an OK, the same data is thrown
525 * to SwapInHeaders(). If the reply line is a NOT_MODIFIED,
526 * we simply stop parsing.
527 */
528 static int
529 peerDigestFetchReply(void *data, char *buf, ssize_t size)
530 {
531 DigestFetchState *fetch = (DigestFetchState *)data;
532 PeerDigest *pd = fetch->pd;
533 size_t hdr_size;
534 assert(pd && buf);
535 assert(!fetch->offset);
536
537 assert(fetch->state == DIGEST_READ_REPLY);
538
539 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply"))
540 return -1;
541
542 if ((hdr_size = headersEnd(buf, size))) {
543 http_status status;
544 HttpReply const *reply = fetch->entry->getReply();
545 assert(reply);
546 assert (reply->sline.status != 0);
547 status = reply->sline.status;
548 debugs(72, 3, "peerDigestFetchReply: " << pd->host << " status: " << status <<
549 ", expires: " << (long int) reply->expires << " (" << std::showpos <<
550 (int) (reply->expires - squid_curtime) << ")");
551
552 /* this "if" is based on clientHandleIMSReply() */
553
554 if (status == HTTP_NOT_MODIFIED) {
555 /* our old entry is fine */
556 assert(fetch->old_entry);
557
558 if (!fetch->old_entry->mem_obj->request)
559 fetch->old_entry->mem_obj->request = HTTPMSGLOCK(fetch->entry->mem_obj->request);
560
561 assert(fetch->old_entry->mem_obj->request);
562
563 HttpReply *old_rep = (HttpReply *) fetch->old_entry->getReply();
564
565 old_rep->updateOnNotModified(reply);
566
567 fetch->old_entry->timestampsSet();
568
569 /* get rid of 304 reply */
570 storeUnregister(fetch->sc, fetch->entry, fetch);
571
572 fetch->entry->unlock();
573
574 fetch->entry = fetch->old_entry;
575
576 fetch->old_entry = NULL;
577
578 /* preserve request -- we need its size to update counters */
579 /* requestUnlink(r); */
580 /* fetch->entry->mem_obj->request = NULL; */
581 } else if (status == HTTP_OK) {
582 /* get rid of old entry if any */
583
584 if (fetch->old_entry) {
585 debugs(72, 3, "peerDigestFetchReply: got new digest, releasing old one");
586 storeUnregister(fetch->old_sc, fetch->old_entry, fetch);
587 fetch->old_entry->releaseRequest();
588 fetch->old_entry->unlock();
589 fetch->old_entry = NULL;
590 }
591 } else {
592 /* some kind of a bug */
593 peerDigestFetchAbort(fetch, buf, httpStatusLineReason(&reply->sline));
594 return -1; /* XXX -1 will abort stuff in ReadReply! */
595 }
596
597 /* must have a ready-to-use store entry if we got here */
598 /* can we stay with the old in-memory digest? */
599 if (status == HTTP_NOT_MODIFIED && fetch->pd->cd) {
600 peerDigestFetchStop(fetch, buf, "Not modified");
601 fetch->state = DIGEST_READ_DONE;
602 } else {
603 fetch->state = DIGEST_READ_HEADERS;
604 }
605 } else {
606 /* need more data, do we have space? */
607
608 if (size >= SM_PAGE_SIZE)
609 peerDigestFetchAbort(fetch, buf, "reply header too big");
610 }
611
612 /* We don't want to actually ack that we've handled anything,
613 * otherwise SwapInHeaders() won't get the reply line .. */
614 return 0;
615 }
616
617 /* fetch headers from disk, pass on to SwapInCBlock */
618 int
619 peerDigestSwapInHeaders(void *data, char *buf, ssize_t size)
620 {
621 DigestFetchState *fetch = (DigestFetchState *)data;
622 size_t hdr_size;
623
624 assert(fetch->state == DIGEST_READ_HEADERS);
625
626 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInHeaders"))
627 return -1;
628
629 assert(!fetch->offset);
630
631 if ((hdr_size = headersEnd(buf, size))) {
632 assert(fetch->entry->getReply());
633 assert (fetch->entry->getReply()->sline.status != 0);
634
635 if (fetch->entry->getReply()->sline.status != HTTP_OK) {
636 debugs(72, DBG_IMPORTANT, "peerDigestSwapInHeaders: " << fetch->pd->host <<
637 " status " << fetch->entry->getReply()->sline.status <<
638 " got cached!");
639
640 peerDigestFetchAbort(fetch, buf, "internal status error");
641 return -1;
642 }
643
644 fetch->state = DIGEST_READ_CBLOCK;
645 return hdr_size; /* Say how much data we read */
646 } else {
647 /* need more data, do we have space? */
648
649 if (size >= SM_PAGE_SIZE) {
650 peerDigestFetchAbort(fetch, buf, "stored header too big");
651 return -1;
652 } else {
653 return 0; /* We need to read more to parse .. */
654 }
655 }
656
657 fatal("peerDigestSwapInHeaders() - shouldn't get here!\n");
658 return 0; /* keep gcc happy */
659 }
660
661 int
662 peerDigestSwapInCBlock(void *data, char *buf, ssize_t size)
663 {
664 DigestFetchState *fetch = (DigestFetchState *)data;
665
666 assert(fetch->state == DIGEST_READ_CBLOCK);
667
668 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInCBlock"))
669 return -1;
670
671 if (size >= (ssize_t)StoreDigestCBlockSize) {
672 PeerDigest *pd = fetch->pd;
673
674 assert(pd && fetch->entry->getReply());
675
676 if (peerDigestSetCBlock(pd, buf)) {
677 /* XXX: soon we will have variable header size */
678 /* switch to CD buffer and fetch digest guts */
679 buf = NULL;
680 assert(pd->cd->mask);
681 fetch->state = DIGEST_READ_MASK;
682 return StoreDigestCBlockSize;
683 } else {
684 peerDigestFetchAbort(fetch, buf, "invalid digest cblock");
685 return -1;
686 }
687 } else {
688 /* need more data, do we have space? */
689
690 if (size >= SM_PAGE_SIZE) {
691 peerDigestFetchAbort(fetch, buf, "digest cblock too big");
692 return -1;
693 } else {
694 return 0; /* We need more data */
695 }
696 }
697
698 fatal("peerDigestSwapInCBlock(): shouldn't get here!\n");
699 return 0; /* keep gcc happy */
700 }
701
702 int
703 peerDigestSwapInMask(void *data, char *buf, ssize_t size)
704 {
705 DigestFetchState *fetch = (DigestFetchState *)data;
706 PeerDigest *pd;
707
708 pd = fetch->pd;
709 assert(pd->cd && pd->cd->mask);
710
711 /*
712 * NOTENOTENOTENOTENOTE: buf doesn't point to pd->cd->mask anymore!
713 * we need to do the copy ourselves!
714 */
715 memcpy(pd->cd->mask + fetch->mask_offset, buf, size);
716
717 /* NOTE! buf points to the middle of pd->cd->mask! */
718
719 if (peerDigestFetchedEnough(fetch, NULL, size, "peerDigestSwapInMask"))
720 return -1;
721
722 fetch->mask_offset += size;
723
724 if (fetch->mask_offset >= pd->cd->mask_size) {
725 debugs(72, 2, "peerDigestSwapInMask: Done! Got " <<
726 fetch->mask_offset << ", expected " << pd->cd->mask_size);
727 assert(fetch->mask_offset == pd->cd->mask_size);
728 assert(peerDigestFetchedEnough(fetch, NULL, 0, "peerDigestSwapInMask"));
729 return -1; /* XXX! */
730 } else {
731 /* We always read everything, so return so */
732 return size;
733 }
734
735 fatal("peerDigestSwapInMask(): shouldn't get here!\n");
736 return 0; /* keep gcc happy */
737 }
738
739 static int
740 peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name)
741 {
742 PeerDigest *pd = NULL;
743 const char *host = "<unknown>"; /* peer host */
744 const char *reason = NULL; /* reason for completion */
745 const char *no_bug = NULL; /* successful completion if set */
746 const int pdcb_valid = cbdataReferenceValid(fetch->pd);
747 const int pcb_valid = cbdataReferenceValid(fetch->pd->peer);
748
749 /* test possible exiting conditions (the same for most steps!)
750 * cases marked with '?!' should not happen */
751
752 if (!reason) {
753 if (!(pd = fetch->pd))
754 reason = "peer digest disappeared?!";
755
756 #if DONT /* WHY NOT? /HNO */
757
758 else if (!cbdataReferenceValid(pd))
759 reason = "invalidated peer digest?!";
760
761 #endif
762
763 else
764 host = pd->host.termedBuf();
765 }
766
767 debugs(72, 6, step_name << ": peer " << host << ", offset: " <<
768 fetch->offset << " size: " << size << ".");
769
770 /* continue checking (with pd and host known and valid) */
771
772 if (!reason) {
773 if (!cbdataReferenceValid(pd->peer))
774 reason = "peer disappeared";
775 else if (size < 0)
776 reason = "swap failure";
777 else if (!fetch->entry)
778 reason = "swap aborted?!";
779 else if (EBIT_TEST(fetch->entry->flags, ENTRY_ABORTED))
780 reason = "swap aborted";
781 }
782
783 /* continue checking (maybe-successful eof case) */
784 if (!reason && !size) {
785 if (!pd->cd)
786 reason = "null digest?!";
787 else if (fetch->mask_offset != (int)pd->cd->mask_size)
788 reason = "premature end of digest?!";
789 else if (!peerDigestUseful(pd))
790 reason = "useless digest";
791 else
792 reason = no_bug = "success";
793 }
794
795 /* finish if we have a reason */
796 if (reason) {
797 const int level = strstr(reason, "?!") ? 1 : 3;
798 debugs(72, level, "" << step_name << ": peer " << host << ", exiting after '" << reason << "'");
799 peerDigestReqFinish(fetch, buf,
800 1, pdcb_valid, pcb_valid, reason, !no_bug);
801 } else {
802 /* paranoid check */
803 assert(pdcb_valid && pcb_valid);
804 }
805
806 return reason != NULL;
807 }
808
809 /* call this when all callback data is valid and fetch must be stopped but
810 * no error has occurred (e.g. we received 304 reply and reuse old digest) */
811 static void
812 peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason)
813 {
814 assert(reason);
815 debugs(72, 2, "peerDigestFetchStop: peer " << fetch->pd->host << ", reason: " << reason);
816 peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 0);
817 }
818
819 /* call this when all callback data is valid but something bad happened */
820 static void
821 peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason)
822 {
823 assert(reason);
824 debugs(72, 2, "peerDigestFetchAbort: peer " << fetch->pd->host << ", reason: " << reason);
825 peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 1);
826 }
827
828 /* complete the digest transfer, update stats, unlock/release everything */
829 static void
830 peerDigestReqFinish(DigestFetchState * fetch, char *buf,
831 int fcb_valid, int pdcb_valid, int pcb_valid,
832 const char *reason, int err)
833 {
834 assert(reason);
835
836 /* must go before peerDigestPDFinish */
837
838 if (pdcb_valid) {
839 fetch->pd->flags.requested = false;
840 fetch->pd->req_result = reason;
841 }
842
843 /* schedule next check if peer is still out there */
844 if (pcb_valid) {
845 PeerDigest *pd = fetch->pd;
846
847 if (err) {
848 pd->times.retry_delay = peerDigestIncDelay(pd);
849 peerDigestSetCheck(pd, pd->times.retry_delay);
850 } else {
851 pd->times.retry_delay = 0;
852 peerDigestSetCheck(pd, peerDigestNewDelay(fetch->entry));
853 }
854 }
855
856 /* note: order is significant */
857 if (fcb_valid)
858 peerDigestFetchSetStats(fetch);
859
860 if (pdcb_valid)
861 peerDigestPDFinish(fetch, pcb_valid, err);
862
863 if (fcb_valid)
864 peerDigestFetchFinish(fetch, err);
865 }
866
867 /* destroys digest if peer disappeared
868 * must be called only when fetch and pd cbdata are valid */
869 static void
870 peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err)
871 {
872 PeerDigest *pd = fetch->pd;
873 const char *host = pd->host.termedBuf();
874
875 pd->times.received = squid_curtime;
876 pd->times.req_delay = fetch->resp_time;
877 kb_incr(&pd->stats.sent.kbytes, (size_t) fetch->sent.bytes);
878 kb_incr(&pd->stats.recv.kbytes, (size_t) fetch->recv.bytes);
879 pd->stats.sent.msgs += fetch->sent.msg;
880 pd->stats.recv.msgs += fetch->recv.msg;
881
882 if (err) {
883 debugs(72, DBG_IMPORTANT, "" << (pcb_valid ? "temporary " : "" ) << "disabling (" << pd->req_result << ") digest from " << host);
884
885 if (pd->cd) {
886 cacheDigestDestroy(pd->cd);
887 pd->cd = NULL;
888 }
889
890 pd->flags.usable = false;
891
892 if (!pcb_valid)
893 peerDigestNotePeerGone(pd);
894 } else {
895 assert(pcb_valid);
896
897 pd->flags.usable = true;
898
899 /* XXX: ugly condition, but how? */
900
901 if (fetch->entry->store_status == STORE_OK)
902 debugs(72, 2, "re-used old digest from " << host);
903 else
904 debugs(72, 2, "received valid digest from " << host);
905 }
906
907 cbdataReferenceDone(fetch->pd);
908 }
909
910 /* free fetch state structures
911 * must be called only when fetch cbdata is valid */
912 static void
913 peerDigestFetchFinish(DigestFetchState * fetch, int err)
914 {
915 assert(fetch->entry && fetch->request);
916
917 if (fetch->old_entry) {
918 debugs(72, 3, "peerDigestFetchFinish: deleting old entry");
919 storeUnregister(fetch->old_sc, fetch->old_entry, fetch);
920 fetch->old_entry->releaseRequest();
921 fetch->old_entry->unlock();
922 fetch->old_entry = NULL;
923 }
924
925 /* update global stats */
926 kb_incr(&statCounter.cd.kbytes_sent, (size_t) fetch->sent.bytes);
927
928 kb_incr(&statCounter.cd.kbytes_recv, (size_t) fetch->recv.bytes);
929
930 statCounter.cd.msgs_sent += fetch->sent.msg;
931
932 statCounter.cd.msgs_recv += fetch->recv.msg;
933
934 /* unlock everything */
935 storeUnregister(fetch->sc, fetch->entry, fetch);
936
937 fetch->entry->unlock();
938
939 HTTPMSGUNLOCK(fetch->request);
940
941 fetch->entry = NULL;
942
943 assert(fetch->pd == NULL);
944
945 cbdataFree(fetch);
946 }
947
948 /* calculate fetch stats after completion */
949 static void
950 peerDigestFetchSetStats(DigestFetchState * fetch)
951 {
952 MemObject *mem;
953 assert(fetch->entry && fetch->request);
954
955 mem = fetch->entry->mem_obj;
956 assert(mem);
957
958 /* XXX: outgoing numbers are not precise */
959 /* XXX: we must distinguish between 304 hits and misses here */
960 fetch->sent.bytes = fetch->request->prefixLen();
961 /* XXX: this is slightly wrong: we don't KNOW that the entire memobject
962 * was fetched. We only know how big it is
963 */
964 fetch->recv.bytes = mem->size();
965 fetch->sent.msg = fetch->recv.msg = 1;
966 fetch->expires = fetch->entry->expires;
967 fetch->resp_time = squid_curtime - fetch->start_time;
968
969 debugs(72, 3, "peerDigestFetchFinish: recv " << fetch->recv.bytes <<
970 " bytes in " << (int) fetch->resp_time << " secs");
971
972 debugs(72, 3, "peerDigestFetchFinish: expires: " <<
973 (long int) fetch->expires << " (" << std::showpos <<
974 (int) (fetch->expires - squid_curtime) << "), lmt: " <<
975 std::noshowpos << (long int) fetch->entry->lastmod << " (" <<
976 std::showpos << (int) (fetch->entry->lastmod - squid_curtime) <<
977 ")");
978
979 }
980
981 static int
982 peerDigestSetCBlock(PeerDigest * pd, const char *buf)
983 {
984 StoreDigestCBlock cblock;
985 int freed_size = 0;
986 const char *host = pd->host.termedBuf();
987
988 memcpy(&cblock, buf, sizeof(cblock));
989 /* network -> host conversions */
990 cblock.ver.current = ntohs(cblock.ver.current);
991 cblock.ver.required = ntohs(cblock.ver.required);
992 cblock.capacity = ntohl(cblock.capacity);
993 cblock.count = ntohl(cblock.count);
994 cblock.del_count = ntohl(cblock.del_count);
995 cblock.mask_size = ntohl(cblock.mask_size);
996 debugs(72, 2, "got digest cblock from " << host << "; ver: " <<
997 (int) cblock.ver.current << " (req: " << (int) cblock.ver.required <<
998 ")");
999
1000 debugs(72, 2, "\t size: " <<
1001 cblock.mask_size << " bytes, e-cnt: " <<
1002 cblock.count << ", e-util: " <<
1003 xpercentInt(cblock.count, cblock.capacity) << "%" );
1004 /* check version requirements (both ways) */
1005
1006 if (cblock.ver.required > CacheDigestVer.current) {
1007 debugs(72, DBG_IMPORTANT, "" << host << " digest requires version " <<
1008 cblock.ver.required << "; have: " << CacheDigestVer.current);
1009
1010 return 0;
1011 }
1012
1013 if (cblock.ver.current < CacheDigestVer.required) {
1014 debugs(72, DBG_IMPORTANT, "" << host << " digest is version " <<
1015 cblock.ver.current << "; we require: " <<
1016 CacheDigestVer.required);
1017
1018 return 0;
1019 }
1020
1021 /* check consistency */
1022 if (cblock.ver.required > cblock.ver.current ||
1023 cblock.mask_size <= 0 || cblock.capacity <= 0 ||
1024 cblock.bits_per_entry <= 0 || cblock.hash_func_count <= 0) {
1025 debugs(72, DBG_CRITICAL, "" << host << " digest cblock is corrupted.");
1026 return 0;
1027 }
1028
1029 /* check consistency further */
1030 if ((size_t)cblock.mask_size != cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry)) {
1031 debugs(72, DBG_CRITICAL, host << " digest cblock is corrupted " <<
1032 "(mask size mismatch: " << cblock.mask_size << " ? " <<
1033 cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry)
1034 << ").");
1035 return 0;
1036 }
1037
1038 /* there are some things we cannot do yet */
1039 if (cblock.hash_func_count != CacheDigestHashFuncCount) {
1040 debugs(72, DBG_CRITICAL, "" << host << " digest: unsupported #hash functions: " <<
1041 cblock.hash_func_count << " ? " << CacheDigestHashFuncCount << ".");
1042 return 0;
1043 }
1044
1045 /*
1046 * no cblock bugs below this point
1047 */
1048 /* check size changes */
1049 if (pd->cd && cblock.mask_size != (ssize_t)pd->cd->mask_size) {
1050 debugs(72, 2, host << " digest changed size: " << cblock.mask_size <<
1051 " -> " << pd->cd->mask_size);
1052 freed_size = pd->cd->mask_size;
1053 cacheDigestDestroy(pd->cd);
1054 pd->cd = NULL;
1055 }
1056
1057 if (!pd->cd) {
1058 debugs(72, 2, "creating " << host << " digest; size: " << cblock.mask_size << " (" <<
1059 std::showpos << (int) (cblock.mask_size - freed_size) << ") bytes");
1060 pd->cd = cacheDigestCreate(cblock.capacity, cblock.bits_per_entry);
1061
1062 if (cblock.mask_size >= freed_size)
1063 kb_incr(&statCounter.cd.memory, cblock.mask_size - freed_size);
1064 }
1065
1066 assert(pd->cd);
1067 /* these assignments leave us in an inconsistent state until we finish reading the digest */
1068 pd->cd->count = cblock.count;
1069 pd->cd->del_count = cblock.del_count;
1070 return 1;
1071 }
1072
1073 static int
1074 peerDigestUseful(const PeerDigest * pd)
1075 {
1076 /* TODO: we should calculate the prob of a false hit instead of bit util */
1077 const int bit_util = cacheDigestBitUtil(pd->cd);
1078
1079 if (bit_util > 65) {
1080 debugs(72, DBG_CRITICAL, "Warning: " << pd->host <<
1081 " peer digest has too many bits on (" << bit_util << "%%).");
1082
1083 return 0;
1084 }
1085
1086 return 1;
1087 }
1088
1089 static int
1090 saneDiff(time_t diff)
1091 {
1092 return abs((int) diff) > squid_curtime / 2 ? 0 : diff;
1093 }
1094
1095 void
1096 peerDigestStatsReport(const PeerDigest * pd, StoreEntry * e)
1097 {
1098 #define f2s(flag) (pd->flags.flag ? "yes" : "no")
1099 #define appendTime(tm) storeAppendPrintf(e, "%s\t %10ld\t %+d\t %+d\n", \
1100 ""#tm, (long int)pd->times.tm, \
1101 saneDiff(pd->times.tm - squid_curtime), \
1102 saneDiff(pd->times.tm - pd->times.initialized))
1103
1104 assert(pd);
1105
1106 const char *host = pd->host.termedBuf();
1107 storeAppendPrintf(e, "\npeer digest from %s\n", host);
1108
1109 cacheDigestGuessStatsReport(&pd->stats.guess, e, host);
1110
1111 storeAppendPrintf(e, "\nevent\t timestamp\t secs from now\t secs from init\n");
1112 appendTime(initialized);
1113 appendTime(needed);
1114 appendTime(requested);
1115 appendTime(received);
1116 appendTime(next_check);
1117
1118 storeAppendPrintf(e, "peer digest state:\n");
1119 storeAppendPrintf(e, "\tneeded: %3s, usable: %3s, requested: %3s\n",
1120 f2s(needed), f2s(usable), f2s(requested));
1121 storeAppendPrintf(e, "\n\tlast retry delay: %d secs\n",
1122 (int) pd->times.retry_delay);
1123 storeAppendPrintf(e, "\tlast request response time: %d secs\n",
1124 (int) pd->times.req_delay);
1125 storeAppendPrintf(e, "\tlast request result: %s\n",
1126 pd->req_result ? pd->req_result : "(none)");
1127
1128 storeAppendPrintf(e, "\npeer digest traffic:\n");
1129 storeAppendPrintf(e, "\trequests sent: %d, volume: %d KB\n",
1130 pd->stats.sent.msgs, (int) pd->stats.sent.kbytes.kb);
1131 storeAppendPrintf(e, "\treplies recv: %d, volume: %d KB\n",
1132 pd->stats.recv.msgs, (int) pd->stats.recv.kbytes.kb);
1133
1134 storeAppendPrintf(e, "\npeer digest structure:\n");
1135
1136 if (pd->cd)
1137 cacheDigestReport(pd->cd, host, e);
1138 else
1139 storeAppendPrintf(e, "\tno in-memory copy\n");
1140 }
1141
1142 #endif