]> git.ipfire.org Git - thirdparty/squid.git/blob - src/peer_digest.cc
Merged from trunk rev.14181
[thirdparty/squid.git] / src / peer_digest.cc
1 /*
2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 72 Peer Digest Routines */
10
11 #include "squid.h"
12 #if USE_CACHE_DIGESTS
13 #include "CacheDigest.h"
14 #include "CachePeer.h"
15 #include "event.h"
16 #include "FwdState.h"
17 #include "globals.h"
18 #include "HttpReply.h"
19 #include "HttpRequest.h"
20 #include "internal.h"
21 #include "MemObject.h"
22 #include "mime_header.h"
23 #include "neighbors.h"
24 #include "PeerDigest.h"
25 #include "SquidTime.h"
26 #include "Store.h"
27 #include "store_key_md5.h"
28 #include "StoreClient.h"
29 #include "tools.h"
30 #include "util.h"
31
32 /* local types */
33
34 /* local prototypes */
35 static time_t peerDigestIncDelay(const PeerDigest * pd);
36 static time_t peerDigestNewDelay(const StoreEntry * e);
37 static void peerDigestSetCheck(PeerDigest * pd, time_t delay);
38 static void peerDigestClean(PeerDigest *);
39 static EVH peerDigestCheck;
40 static void peerDigestRequest(PeerDigest * pd);
41 static STCB peerDigestHandleReply;
42 static int peerDigestFetchReply(void *, char *, ssize_t);
43 int peerDigestSwapInHeaders(void *, char *, ssize_t);
44 int peerDigestSwapInCBlock(void *, char *, ssize_t);
45 int peerDigestSwapInMask(void *, char *, ssize_t);
46 static int peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name);
47 static void peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason);
48 static void peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason);
49 static void peerDigestReqFinish(DigestFetchState * fetch, char *buf, int, int, int, const char *reason, int err);
50 static void peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err);
51 static void peerDigestFetchFinish(DigestFetchState * fetch, int err);
52 static void peerDigestFetchSetStats(DigestFetchState * fetch);
53 static int peerDigestSetCBlock(PeerDigest * pd, const char *buf);
54 static int peerDigestUseful(const PeerDigest * pd);
55
56 /* local constants */
57 Version const CacheDigestVer = { 5, 3 };
58
59 #define StoreDigestCBlockSize sizeof(StoreDigestCBlock)
60
61 /* min interval for requesting digests from a given peer */
62 static const time_t PeerDigestReqMinGap = 5 * 60; /* seconds */
63 /* min interval for requesting digests (cumulative request stream) */
64 static const time_t GlobDigestReqMinGap = 1 * 60; /* seconds */
65
66 /* local vars */
67
68 static time_t pd_last_req_time = 0; /* last call to Check */
69
70 /* initialize peer digest */
71 static void
72 peerDigestInit(PeerDigest * pd, CachePeer * p)
73 {
74 assert(pd && p);
75
76 memset(pd, 0, sizeof(*pd));
77 /*
78 * DPW 2007-04-12
79 * Lock on to the peer here. The corresponding cbdataReferenceDone()
80 * is in peerDigestDestroy().
81 */
82 pd->peer = cbdataReference(p);
83 /* if peer disappears, we will know it's name */
84 pd->host = p->host;
85
86 pd->times.initialized = squid_curtime;
87 }
88
89 static void
90 peerDigestClean(PeerDigest * pd)
91 {
92 assert(pd);
93
94 if (pd->cd)
95 cacheDigestDestroy(pd->cd);
96
97 pd->host.clean();
98 }
99
100 CBDATA_CLASS_INIT(PeerDigest);
101
102 CBDATA_CLASS_INIT(DigestFetchState);
103
104 DigestFetchState::DigestFetchState(PeerDigest *aPd, HttpRequest *req) :
105 pd(cbdataReference(aPd)),
106 entry(NULL),
107 old_entry(NULL),
108 sc(NULL),
109 old_sc(NULL),
110 request(req),
111 offset(0),
112 mask_offset(0),
113 start_time(squid_curtime),
114 resp_time(0),
115 expires(0),
116 bufofs(0),
117 state(DIGEST_READ_REPLY)
118 {
119 HTTPMSGLOCK(request);
120
121 sent.msg = 0;
122 sent.bytes = 0;
123
124 recv.msg = 0;
125 recv.bytes = 0;
126
127 *buf = 0;
128 }
129
130 DigestFetchState::~DigestFetchState()
131 {
132 /* unlock everything */
133 storeUnregister(sc, entry, this);
134
135 entry->unlock("DigestFetchState destructed");
136 entry = NULL;
137
138 HTTPMSGUNLOCK(request);
139
140 assert(pd == NULL);
141 }
142
143 /* allocate new peer digest, call Init, and lock everything */
144 PeerDigest *
145 peerDigestCreate(CachePeer * p)
146 {
147 PeerDigest *pd;
148 assert(p);
149
150 pd = new PeerDigest;
151 peerDigestInit(pd, p);
152
153 /* XXX This does not look right, and the same thing again in the caller */
154 return cbdataReference(pd);
155 }
156
157 /* call Clean and free/unlock everything */
158 static void
159 peerDigestDestroy(PeerDigest * pd)
160 {
161 void *p;
162 assert(pd);
163 void * peerTmp = pd->peer;
164
165 /*
166 * DPW 2007-04-12
167 * We locked the peer in peerDigestInit(), this is
168 * where we unlock it. If the peer is still valid,
169 * tell it that the digest is gone.
170 */
171 if (cbdataReferenceValidDone(peerTmp, &p))
172 peerNoteDigestGone((CachePeer *)p);
173
174 peerDigestClean(pd);
175
176 delete pd;
177 }
178
179 /* called by peer to indicate that somebody actually needs this digest */
180 void
181 peerDigestNeeded(PeerDigest * pd)
182 {
183 assert(pd);
184 assert(!pd->flags.needed);
185 assert(!pd->cd);
186
187 pd->flags.needed = true;
188 pd->times.needed = squid_curtime;
189 peerDigestSetCheck(pd, 0); /* check asap */
190 }
191
192 /* currently we do not have a reason to disable without destroying */
193 #if FUTURE_CODE
194 /* disables peer for good */
195 static void
196 peerDigestDisable(PeerDigest * pd)
197 {
198 debugs(72, 2, "peerDigestDisable: peer " << pd->host.buf() << " disabled for good");
199 pd->times.disabled = squid_curtime;
200 pd->times.next_check = -1; /* never */
201 pd->flags.usable = 0;
202
203 if (pd->cd) {
204 cacheDigestDestroy(pd->cd);
205 pd->cd = NULL;
206 }
207
208 /* we do not destroy the pd itself to preserve its "history" and stats */
209 }
210
211 #endif
212
213 /* increment retry delay [after an unsuccessful attempt] */
214 static time_t
215 peerDigestIncDelay(const PeerDigest * pd)
216 {
217 assert(pd);
218 return pd->times.retry_delay > 0 ?
219 2 * pd->times.retry_delay : /* exponential backoff */
220 PeerDigestReqMinGap; /* minimal delay */
221 }
222
223 /* artificially increases Expires: setting to avoid race conditions
224 * returns the delay till that [increased] expiration time */
225 static time_t
226 peerDigestNewDelay(const StoreEntry * e)
227 {
228 assert(e);
229
230 if (e->expires > 0)
231 return e->expires + PeerDigestReqMinGap - squid_curtime;
232
233 return PeerDigestReqMinGap;
234 }
235
236 /* registers next digest verification */
237 static void
238 peerDigestSetCheck(PeerDigest * pd, time_t delay)
239 {
240 eventAdd("peerDigestCheck", peerDigestCheck, pd, (double) delay, 1);
241 pd->times.next_check = squid_curtime + delay;
242 debugs(72, 3, "peerDigestSetCheck: will check peer " << pd->host << " in " << delay << " secs");
243 }
244
245 /*
246 * called when peer is about to disappear or have already disappeared
247 */
248 void
249 peerDigestNotePeerGone(PeerDigest * pd)
250 {
251 if (pd->flags.requested) {
252 debugs(72, 2, "peerDigest: peer " << pd->host << " gone, will destroy after fetch.");
253 /* do nothing now, the fetching chain will notice and take action */
254 } else {
255 debugs(72, 2, "peerDigest: peer " << pd->host << " is gone, destroying now.");
256 peerDigestDestroy(pd);
257 }
258 }
259
260 /* callback for eventAdd() (with peer digest locked)
261 * request new digest if our copy is too old or if we lack one;
262 * schedule next check otherwise */
263 static void
264 peerDigestCheck(void *data)
265 {
266 PeerDigest *pd = (PeerDigest *)data;
267 time_t req_time;
268
269 assert(!pd->flags.requested);
270
271 pd->times.next_check = 0; /* unknown */
272
273 if (!cbdataReferenceValid(pd->peer)) {
274 peerDigestNotePeerGone(pd);
275 return;
276 }
277
278 debugs(72, 3, "peerDigestCheck: peer " << pd->peer->host << ":" << pd->peer->http_port);
279 debugs(72, 3, "peerDigestCheck: time: " << squid_curtime <<
280 ", last received: " << (long int) pd->times.received << " (" <<
281 std::showpos << (int) (squid_curtime - pd->times.received) << ")");
282
283 /* decide when we should send the request:
284 * request now unless too close to other requests */
285 req_time = squid_curtime;
286
287 /* per-peer limit */
288
289 if (req_time - pd->times.received < PeerDigestReqMinGap) {
290 debugs(72, 2, "peerDigestCheck: " << pd->host <<
291 ", avoiding close peer requests (" <<
292 (int) (req_time - pd->times.received) << " < " <<
293 (int) PeerDigestReqMinGap << " secs).");
294
295 req_time = pd->times.received + PeerDigestReqMinGap;
296 }
297
298 /* global limit */
299 if (req_time - pd_last_req_time < GlobDigestReqMinGap) {
300 debugs(72, 2, "peerDigestCheck: " << pd->host <<
301 ", avoiding close requests (" <<
302 (int) (req_time - pd_last_req_time) << " < " <<
303 (int) GlobDigestReqMinGap << " secs).");
304
305 req_time = pd_last_req_time + GlobDigestReqMinGap;
306 }
307
308 if (req_time <= squid_curtime)
309 peerDigestRequest(pd); /* will set pd->flags.requested */
310 else
311 peerDigestSetCheck(pd, req_time - squid_curtime);
312 }
313
314 /* ask store for a digest */
315 static void
316 peerDigestRequest(PeerDigest * pd)
317 {
318 CachePeer *p = pd->peer;
319 StoreEntry *e, *old_e;
320 char *url = NULL;
321 const cache_key *key;
322 HttpRequest *req;
323 StoreIOBuffer tempBuffer;
324
325 pd->req_result = NULL;
326 pd->flags.requested = true;
327
328 /* compute future request components */
329
330 if (p->digest_url)
331 url = xstrdup(p->digest_url);
332 else
333 url = xstrdup(internalRemoteUri(p->host, p->http_port, "/squid-internal-periodic/", SBuf(StoreDigestFileName)));
334
335 req = HttpRequest::CreateFromUrl(url);
336
337 assert(req);
338
339 key = storeKeyPublicByRequest(req);
340
341 debugs(72, 2, "peerDigestRequest: " << url << " key: " << storeKeyText(key));
342
343 /* add custom headers */
344 assert(!req->header.len);
345
346 req->header.putStr(HDR_ACCEPT, StoreDigestMimeStr);
347
348 req->header.putStr(HDR_ACCEPT, "text/html");
349
350 if (p->login &&
351 p->login[0] != '*' &&
352 strcmp(p->login, "PASS") != 0 &&
353 strcmp(p->login, "PASSTHRU") != 0 &&
354 strcmp(p->login, "NEGOTIATE") != 0 &&
355 strcmp(p->login, "PROXYPASS") != 0) {
356 req->url.userInfo(SBuf(p->login)); // XXX: performance regression make peer login SBuf as well.
357 }
358 /* create fetch state structure */
359 DigestFetchState *fetch = new DigestFetchState(pd, req);
360
361 /* update timestamps */
362 pd->times.requested = squid_curtime;
363 pd_last_req_time = squid_curtime;
364 req->flags.cachable = true;
365
366 /* the rest is based on clientProcessExpired() */
367 req->flags.refresh = true;
368
369 old_e = fetch->old_entry = Store::Root().get(key);
370
371 if (old_e) {
372 debugs(72, 5, "peerDigestRequest: found old entry");
373
374 old_e->lock("peerDigestRequest");
375 old_e->createMemObject(url, url, req->method);
376
377 fetch->old_sc = storeClientListAdd(old_e, fetch);
378 }
379
380 e = fetch->entry = storeCreateEntry(url, url, req->flags, req->method);
381 assert(EBIT_TEST(e->flags, KEY_PRIVATE));
382 fetch->sc = storeClientListAdd(e, fetch);
383 /* set lastmod to trigger IMS request if possible */
384
385 if (old_e)
386 e->lastmod = old_e->lastmod;
387
388 /* push towards peer cache */
389 debugs(72, 3, "peerDigestRequest: forwarding to fwdStart...");
390
391 FwdState::fwdStart(Comm::ConnectionPointer(), e, req);
392
393 tempBuffer.offset = 0;
394
395 tempBuffer.length = SM_PAGE_SIZE;
396
397 tempBuffer.data = fetch->buf;
398
399 storeClientCopy(fetch->sc, e, tempBuffer,
400 peerDigestHandleReply, fetch);
401
402 safe_free(url);
403 }
404
405 /* Handle the data copying .. */
406
407 /*
408 * This routine handles the copy data and then redirects the
409 * copy to a bunch of subfunctions depending upon the copy state.
410 * It also tracks the buffer offset and "seen", since I'm actually
411 * not interested in rewriting everything to suit my little idea.
412 */
413 static void
414 peerDigestHandleReply(void *data, StoreIOBuffer receivedData)
415 {
416 DigestFetchState *fetch = (DigestFetchState *)data;
417 int retsize = -1;
418 digest_read_state_t prevstate;
419 int newsize;
420
421 assert(fetch->pd && receivedData.data);
422 /* The existing code assumes that the received pointer is
423 * where we asked the data to be put
424 */
425 assert(fetch->buf + fetch->bufofs == receivedData.data);
426
427 /* Update the buffer size */
428 fetch->bufofs += receivedData.length;
429
430 assert(fetch->bufofs <= SM_PAGE_SIZE);
431
432 /* If we've fetched enough, return */
433
434 if (peerDigestFetchedEnough(fetch, fetch->buf, fetch->bufofs, "peerDigestHandleReply"))
435 return;
436
437 /* Call the right function based on the state */
438 /* (Those functions will update the state if needed) */
439
440 /* Give us a temporary reference. Some of the calls we make may
441 * try to destroy the fetch structure, and we like to know if they
442 * do
443 */
444 CbcPointer<DigestFetchState> tmpLock = fetch;
445
446 /* Repeat this loop until we're out of data OR the state changes */
447 /* (So keep going if the state has changed and we still have data */
448 do {
449 prevstate = fetch->state;
450
451 switch (fetch->state) {
452
453 case DIGEST_READ_REPLY:
454 retsize = peerDigestFetchReply(fetch, fetch->buf, fetch->bufofs);
455 break;
456
457 case DIGEST_READ_HEADERS:
458 retsize = peerDigestSwapInHeaders(fetch, fetch->buf, fetch->bufofs);
459 break;
460
461 case DIGEST_READ_CBLOCK:
462 retsize = peerDigestSwapInCBlock(fetch, fetch->buf, fetch->bufofs);
463 break;
464
465 case DIGEST_READ_MASK:
466 retsize = peerDigestSwapInMask(fetch, fetch->buf, fetch->bufofs);
467 break;
468
469 case DIGEST_READ_NONE:
470 break;
471
472 case DIGEST_READ_DONE:
473 return;
474 break;
475
476 default:
477 fatal("Bad digest transfer mode!\n");
478 }
479
480 if (retsize < 0)
481 return;
482
483 /*
484 * The returned size indicates how much of the buffer was read -
485 * so move the remainder of the buffer to the beginning
486 * and update the bufofs / bufsize
487 */
488 newsize = fetch->bufofs - retsize;
489
490 memmove(fetch->buf, fetch->buf + retsize, fetch->bufofs - newsize);
491
492 fetch->bufofs = newsize;
493
494 } while (cbdataReferenceValid(fetch) && prevstate != fetch->state && fetch->bufofs > 0);
495
496 /* Update the copy offset */
497 fetch->offset += receivedData.length;
498
499 /* Schedule another copy */
500 if (cbdataReferenceValid(fetch)) {
501 StoreIOBuffer tempBuffer;
502 tempBuffer.offset = fetch->offset;
503 tempBuffer.length = SM_PAGE_SIZE - fetch->bufofs;
504 tempBuffer.data = fetch->buf + fetch->bufofs;
505 storeClientCopy(fetch->sc, fetch->entry, tempBuffer,
506 peerDigestHandleReply, fetch);
507 }
508 }
509
510 /* wait for full http headers to be received then parse them */
511 /*
512 * This routine handles parsing the reply line.
513 * If the reply line indicates an OK, the same data is thrown
514 * to SwapInHeaders(). If the reply line is a NOT_MODIFIED,
515 * we simply stop parsing.
516 */
517 static int
518 peerDigestFetchReply(void *data, char *buf, ssize_t size)
519 {
520 DigestFetchState *fetch = (DigestFetchState *)data;
521 PeerDigest *pd = fetch->pd;
522 size_t hdr_size;
523 assert(pd && buf);
524 assert(!fetch->offset);
525
526 assert(fetch->state == DIGEST_READ_REPLY);
527
528 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply"))
529 return -1;
530
531 if ((hdr_size = headersEnd(buf, size))) {
532 HttpReply const *reply = fetch->entry->getReply();
533 assert(reply);
534 assert(reply->sline.status() != Http::scNone);
535 const Http::StatusCode status = reply->sline.status();
536 debugs(72, 3, "peerDigestFetchReply: " << pd->host << " status: " << status <<
537 ", expires: " << (long int) reply->expires << " (" << std::showpos <<
538 (int) (reply->expires - squid_curtime) << ")");
539
540 /* this "if" is based on clientHandleIMSReply() */
541
542 if (status == Http::scNotModified) {
543 /* our old entry is fine */
544 assert(fetch->old_entry);
545
546 if (!fetch->old_entry->mem_obj->request) {
547 fetch->old_entry->mem_obj->request = fetch->entry->mem_obj->request;
548 HTTPMSGLOCK(fetch->old_entry->mem_obj->request);
549 }
550
551 assert(fetch->old_entry->mem_obj->request);
552
553 HttpReply *old_rep = (HttpReply *) fetch->old_entry->getReply();
554
555 old_rep->updateOnNotModified(reply);
556
557 fetch->old_entry->timestampsSet();
558
559 /* get rid of 304 reply */
560 storeUnregister(fetch->sc, fetch->entry, fetch);
561
562 fetch->entry->unlock("peerDigestFetchReply 304");
563
564 fetch->entry = fetch->old_entry;
565
566 fetch->old_entry = NULL;
567
568 /* preserve request -- we need its size to update counters */
569 /* requestUnlink(r); */
570 /* fetch->entry->mem_obj->request = NULL; */
571 } else if (status == Http::scOkay) {
572 /* get rid of old entry if any */
573
574 if (fetch->old_entry) {
575 debugs(72, 3, "peerDigestFetchReply: got new digest, releasing old one");
576 storeUnregister(fetch->old_sc, fetch->old_entry, fetch);
577 fetch->old_entry->releaseRequest();
578 fetch->old_entry->unlock("peerDigestFetchReply 200");
579 fetch->old_entry = NULL;
580 }
581 } else {
582 /* some kind of a bug */
583 peerDigestFetchAbort(fetch, buf, reply->sline.reason());
584 return -1; /* XXX -1 will abort stuff in ReadReply! */
585 }
586
587 /* must have a ready-to-use store entry if we got here */
588 /* can we stay with the old in-memory digest? */
589 if (status == Http::scNotModified && fetch->pd->cd) {
590 peerDigestFetchStop(fetch, buf, "Not modified");
591 fetch->state = DIGEST_READ_DONE;
592 } else {
593 fetch->state = DIGEST_READ_HEADERS;
594 }
595 } else {
596 /* need more data, do we have space? */
597
598 if (size >= SM_PAGE_SIZE)
599 peerDigestFetchAbort(fetch, buf, "reply header too big");
600 }
601
602 /* We don't want to actually ack that we've handled anything,
603 * otherwise SwapInHeaders() won't get the reply line .. */
604 return 0;
605 }
606
607 /* fetch headers from disk, pass on to SwapInCBlock */
608 int
609 peerDigestSwapInHeaders(void *data, char *buf, ssize_t size)
610 {
611 DigestFetchState *fetch = (DigestFetchState *)data;
612 size_t hdr_size;
613
614 assert(fetch->state == DIGEST_READ_HEADERS);
615
616 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInHeaders"))
617 return -1;
618
619 assert(!fetch->offset);
620
621 if ((hdr_size = headersEnd(buf, size))) {
622 assert(fetch->entry->getReply());
623 assert(fetch->entry->getReply()->sline.status() != Http::scNone);
624
625 if (fetch->entry->getReply()->sline.status() != Http::scOkay) {
626 debugs(72, DBG_IMPORTANT, "peerDigestSwapInHeaders: " << fetch->pd->host <<
627 " status " << fetch->entry->getReply()->sline.status() <<
628 " got cached!");
629
630 peerDigestFetchAbort(fetch, buf, "internal status error");
631 return -1;
632 }
633
634 fetch->state = DIGEST_READ_CBLOCK;
635 return hdr_size; /* Say how much data we read */
636 }
637
638 /* need more data, do we have space? */
639 if (size >= SM_PAGE_SIZE) {
640 peerDigestFetchAbort(fetch, buf, "stored header too big");
641 return -1;
642 }
643
644 return 0; /* We need to read more to parse .. */
645 }
646
647 int
648 peerDigestSwapInCBlock(void *data, char *buf, ssize_t size)
649 {
650 DigestFetchState *fetch = (DigestFetchState *)data;
651
652 assert(fetch->state == DIGEST_READ_CBLOCK);
653
654 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInCBlock"))
655 return -1;
656
657 if (size >= (ssize_t)StoreDigestCBlockSize) {
658 PeerDigest *pd = fetch->pd;
659
660 assert(pd && fetch->entry->getReply());
661
662 if (peerDigestSetCBlock(pd, buf)) {
663 /* XXX: soon we will have variable header size */
664 /* switch to CD buffer and fetch digest guts */
665 buf = NULL;
666 assert(pd->cd->mask);
667 fetch->state = DIGEST_READ_MASK;
668 return StoreDigestCBlockSize;
669 } else {
670 peerDigestFetchAbort(fetch, buf, "invalid digest cblock");
671 return -1;
672 }
673 }
674
675 /* need more data, do we have space? */
676 if (size >= SM_PAGE_SIZE) {
677 peerDigestFetchAbort(fetch, buf, "digest cblock too big");
678 return -1;
679 }
680
681 return 0; /* We need more data */
682 }
683
684 int
685 peerDigestSwapInMask(void *data, char *buf, ssize_t size)
686 {
687 DigestFetchState *fetch = (DigestFetchState *)data;
688 PeerDigest *pd;
689
690 pd = fetch->pd;
691 assert(pd->cd && pd->cd->mask);
692
693 /*
694 * NOTENOTENOTENOTENOTE: buf doesn't point to pd->cd->mask anymore!
695 * we need to do the copy ourselves!
696 */
697 memcpy(pd->cd->mask + fetch->mask_offset, buf, size);
698
699 /* NOTE! buf points to the middle of pd->cd->mask! */
700
701 if (peerDigestFetchedEnough(fetch, NULL, size, "peerDigestSwapInMask"))
702 return -1;
703
704 fetch->mask_offset += size;
705
706 if (fetch->mask_offset >= pd->cd->mask_size) {
707 debugs(72, 2, "peerDigestSwapInMask: Done! Got " <<
708 fetch->mask_offset << ", expected " << pd->cd->mask_size);
709 assert(fetch->mask_offset == pd->cd->mask_size);
710 assert(peerDigestFetchedEnough(fetch, NULL, 0, "peerDigestSwapInMask"));
711 return -1; /* XXX! */
712 }
713
714 /* We always read everything, so return size */
715 return size;
716 }
717
718 static int
719 peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name)
720 {
721 PeerDigest *pd = NULL;
722 const char *host = "<unknown>"; /* peer host */
723 const char *reason = NULL; /* reason for completion */
724 const char *no_bug = NULL; /* successful completion if set */
725 const int pdcb_valid = cbdataReferenceValid(fetch->pd);
726 const int pcb_valid = cbdataReferenceValid(fetch->pd->peer);
727
728 /* test possible exiting conditions (the same for most steps!)
729 * cases marked with '?!' should not happen */
730
731 if (!reason) {
732 if (!(pd = fetch->pd))
733 reason = "peer digest disappeared?!";
734
735 #if DONT /* WHY NOT? /HNO */
736
737 else if (!cbdataReferenceValid(pd))
738 reason = "invalidated peer digest?!";
739
740 #endif
741
742 else
743 host = pd->host.termedBuf();
744 }
745
746 debugs(72, 6, step_name << ": peer " << host << ", offset: " <<
747 fetch->offset << " size: " << size << ".");
748
749 /* continue checking (with pd and host known and valid) */
750
751 if (!reason) {
752 if (!cbdataReferenceValid(pd->peer))
753 reason = "peer disappeared";
754 else if (size < 0)
755 reason = "swap failure";
756 else if (!fetch->entry)
757 reason = "swap aborted?!";
758 else if (EBIT_TEST(fetch->entry->flags, ENTRY_ABORTED))
759 reason = "swap aborted";
760 }
761
762 /* continue checking (maybe-successful eof case) */
763 if (!reason && !size) {
764 if (!pd->cd)
765 reason = "null digest?!";
766 else if (fetch->mask_offset != (int)pd->cd->mask_size)
767 reason = "premature end of digest?!";
768 else if (!peerDigestUseful(pd))
769 reason = "useless digest";
770 else
771 reason = no_bug = "success";
772 }
773
774 /* finish if we have a reason */
775 if (reason) {
776 const int level = strstr(reason, "?!") ? 1 : 3;
777 debugs(72, level, "" << step_name << ": peer " << host << ", exiting after '" << reason << "'");
778 peerDigestReqFinish(fetch, buf,
779 1, pdcb_valid, pcb_valid, reason, !no_bug);
780 } else {
781 /* paranoid check */
782 assert(pdcb_valid && pcb_valid);
783 }
784
785 return reason != NULL;
786 }
787
788 /* call this when all callback data is valid and fetch must be stopped but
789 * no error has occurred (e.g. we received 304 reply and reuse old digest) */
790 static void
791 peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason)
792 {
793 assert(reason);
794 debugs(72, 2, "peerDigestFetchStop: peer " << fetch->pd->host << ", reason: " << reason);
795 peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 0);
796 }
797
798 /* call this when all callback data is valid but something bad happened */
799 static void
800 peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason)
801 {
802 assert(reason);
803 debugs(72, 2, "peerDigestFetchAbort: peer " << fetch->pd->host << ", reason: " << reason);
804 peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 1);
805 }
806
807 /* complete the digest transfer, update stats, unlock/release everything */
808 static void
809 peerDigestReqFinish(DigestFetchState * fetch, char *buf,
810 int fcb_valid, int pdcb_valid, int pcb_valid,
811 const char *reason, int err)
812 {
813 assert(reason);
814
815 /* must go before peerDigestPDFinish */
816
817 if (pdcb_valid) {
818 fetch->pd->flags.requested = false;
819 fetch->pd->req_result = reason;
820 }
821
822 /* schedule next check if peer is still out there */
823 if (pcb_valid) {
824 PeerDigest *pd = fetch->pd;
825
826 if (err) {
827 pd->times.retry_delay = peerDigestIncDelay(pd);
828 peerDigestSetCheck(pd, pd->times.retry_delay);
829 } else {
830 pd->times.retry_delay = 0;
831 peerDigestSetCheck(pd, peerDigestNewDelay(fetch->entry));
832 }
833 }
834
835 /* note: order is significant */
836 if (fcb_valid)
837 peerDigestFetchSetStats(fetch);
838
839 if (pdcb_valid)
840 peerDigestPDFinish(fetch, pcb_valid, err);
841
842 if (fcb_valid)
843 peerDigestFetchFinish(fetch, err);
844 }
845
846 /* destroys digest if peer disappeared
847 * must be called only when fetch and pd cbdata are valid */
848 static void
849 peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err)
850 {
851 PeerDigest *pd = fetch->pd;
852 const char *host = pd->host.termedBuf();
853
854 pd->times.received = squid_curtime;
855 pd->times.req_delay = fetch->resp_time;
856 kb_incr(&pd->stats.sent.kbytes, (size_t) fetch->sent.bytes);
857 kb_incr(&pd->stats.recv.kbytes, (size_t) fetch->recv.bytes);
858 pd->stats.sent.msgs += fetch->sent.msg;
859 pd->stats.recv.msgs += fetch->recv.msg;
860
861 if (err) {
862 debugs(72, DBG_IMPORTANT, "" << (pcb_valid ? "temporary " : "" ) << "disabling (" << pd->req_result << ") digest from " << host);
863
864 if (pd->cd) {
865 cacheDigestDestroy(pd->cd);
866 pd->cd = NULL;
867 }
868
869 pd->flags.usable = false;
870
871 if (!pcb_valid)
872 peerDigestNotePeerGone(pd);
873 } else {
874 assert(pcb_valid);
875
876 pd->flags.usable = true;
877
878 /* XXX: ugly condition, but how? */
879
880 if (fetch->entry->store_status == STORE_OK)
881 debugs(72, 2, "re-used old digest from " << host);
882 else
883 debugs(72, 2, "received valid digest from " << host);
884 }
885
886 cbdataReferenceDone(fetch->pd);
887 }
888
889 /* free fetch state structures
890 * must be called only when fetch cbdata is valid */
891 static void
892 peerDigestFetchFinish(DigestFetchState * fetch, int err)
893 {
894 assert(fetch->entry && fetch->request);
895
896 if (fetch->old_entry) {
897 debugs(72, 3, "peerDigestFetchFinish: deleting old entry");
898 storeUnregister(fetch->old_sc, fetch->old_entry, fetch);
899 fetch->old_entry->releaseRequest();
900 fetch->old_entry->unlock("peerDigestFetchFinish old");
901 fetch->old_entry = NULL;
902 }
903
904 /* update global stats */
905 kb_incr(&statCounter.cd.kbytes_sent, (size_t) fetch->sent.bytes);
906
907 kb_incr(&statCounter.cd.kbytes_recv, (size_t) fetch->recv.bytes);
908
909 statCounter.cd.msgs_sent += fetch->sent.msg;
910
911 statCounter.cd.msgs_recv += fetch->recv.msg;
912
913 delete fetch;
914 }
915
916 /* calculate fetch stats after completion */
917 static void
918 peerDigestFetchSetStats(DigestFetchState * fetch)
919 {
920 MemObject *mem;
921 assert(fetch->entry && fetch->request);
922
923 mem = fetch->entry->mem_obj;
924 assert(mem);
925
926 /* XXX: outgoing numbers are not precise */
927 /* XXX: we must distinguish between 304 hits and misses here */
928 fetch->sent.bytes = fetch->request->prefixLen();
929 /* XXX: this is slightly wrong: we don't KNOW that the entire memobject
930 * was fetched. We only know how big it is
931 */
932 fetch->recv.bytes = mem->size();
933 fetch->sent.msg = fetch->recv.msg = 1;
934 fetch->expires = fetch->entry->expires;
935 fetch->resp_time = squid_curtime - fetch->start_time;
936
937 debugs(72, 3, "peerDigestFetchFinish: recv " << fetch->recv.bytes <<
938 " bytes in " << (int) fetch->resp_time << " secs");
939
940 debugs(72, 3, "peerDigestFetchFinish: expires: " <<
941 (long int) fetch->expires << " (" << std::showpos <<
942 (int) (fetch->expires - squid_curtime) << "), lmt: " <<
943 std::noshowpos << (long int) fetch->entry->lastmod << " (" <<
944 std::showpos << (int) (fetch->entry->lastmod - squid_curtime) <<
945 ")");
946
947 }
948
949 static int
950 peerDigestSetCBlock(PeerDigest * pd, const char *buf)
951 {
952 StoreDigestCBlock cblock;
953 int freed_size = 0;
954 const char *host = pd->host.termedBuf();
955
956 memcpy(&cblock, buf, sizeof(cblock));
957 /* network -> host conversions */
958 cblock.ver.current = ntohs(cblock.ver.current);
959 cblock.ver.required = ntohs(cblock.ver.required);
960 cblock.capacity = ntohl(cblock.capacity);
961 cblock.count = ntohl(cblock.count);
962 cblock.del_count = ntohl(cblock.del_count);
963 cblock.mask_size = ntohl(cblock.mask_size);
964 debugs(72, 2, "got digest cblock from " << host << "; ver: " <<
965 (int) cblock.ver.current << " (req: " << (int) cblock.ver.required <<
966 ")");
967
968 debugs(72, 2, "\t size: " <<
969 cblock.mask_size << " bytes, e-cnt: " <<
970 cblock.count << ", e-util: " <<
971 xpercentInt(cblock.count, cblock.capacity) << "%" );
972 /* check version requirements (both ways) */
973
974 if (cblock.ver.required > CacheDigestVer.current) {
975 debugs(72, DBG_IMPORTANT, "" << host << " digest requires version " <<
976 cblock.ver.required << "; have: " << CacheDigestVer.current);
977
978 return 0;
979 }
980
981 if (cblock.ver.current < CacheDigestVer.required) {
982 debugs(72, DBG_IMPORTANT, "" << host << " digest is version " <<
983 cblock.ver.current << "; we require: " <<
984 CacheDigestVer.required);
985
986 return 0;
987 }
988
989 /* check consistency */
990 if (cblock.ver.required > cblock.ver.current ||
991 cblock.mask_size <= 0 || cblock.capacity <= 0 ||
992 cblock.bits_per_entry <= 0 || cblock.hash_func_count <= 0) {
993 debugs(72, DBG_CRITICAL, "" << host << " digest cblock is corrupted.");
994 return 0;
995 }
996
997 /* check consistency further */
998 if ((size_t)cblock.mask_size != cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry)) {
999 debugs(72, DBG_CRITICAL, host << " digest cblock is corrupted " <<
1000 "(mask size mismatch: " << cblock.mask_size << " ? " <<
1001 cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry)
1002 << ").");
1003 return 0;
1004 }
1005
1006 /* there are some things we cannot do yet */
1007 if (cblock.hash_func_count != CacheDigestHashFuncCount) {
1008 debugs(72, DBG_CRITICAL, "" << host << " digest: unsupported #hash functions: " <<
1009 cblock.hash_func_count << " ? " << CacheDigestHashFuncCount << ".");
1010 return 0;
1011 }
1012
1013 /*
1014 * no cblock bugs below this point
1015 */
1016 /* check size changes */
1017 if (pd->cd && cblock.mask_size != (ssize_t)pd->cd->mask_size) {
1018 debugs(72, 2, host << " digest changed size: " << cblock.mask_size <<
1019 " -> " << pd->cd->mask_size);
1020 freed_size = pd->cd->mask_size;
1021 cacheDigestDestroy(pd->cd);
1022 pd->cd = NULL;
1023 }
1024
1025 if (!pd->cd) {
1026 debugs(72, 2, "creating " << host << " digest; size: " << cblock.mask_size << " (" <<
1027 std::showpos << (int) (cblock.mask_size - freed_size) << ") bytes");
1028 pd->cd = cacheDigestCreate(cblock.capacity, cblock.bits_per_entry);
1029
1030 if (cblock.mask_size >= freed_size)
1031 kb_incr(&statCounter.cd.memory, cblock.mask_size - freed_size);
1032 }
1033
1034 assert(pd->cd);
1035 /* these assignments leave us in an inconsistent state until we finish reading the digest */
1036 pd->cd->count = cblock.count;
1037 pd->cd->del_count = cblock.del_count;
1038 return 1;
1039 }
1040
1041 static int
1042 peerDigestUseful(const PeerDigest * pd)
1043 {
1044 /* TODO: we should calculate the prob of a false hit instead of bit util */
1045 const int bit_util = cacheDigestBitUtil(pd->cd);
1046
1047 if (bit_util > 65) {
1048 debugs(72, DBG_CRITICAL, "Warning: " << pd->host <<
1049 " peer digest has too many bits on (" << bit_util << "%%).");
1050
1051 return 0;
1052 }
1053
1054 return 1;
1055 }
1056
1057 static int
1058 saneDiff(time_t diff)
1059 {
1060 return abs((int) diff) > squid_curtime / 2 ? 0 : diff;
1061 }
1062
1063 void
1064 peerDigestStatsReport(const PeerDigest * pd, StoreEntry * e)
1065 {
1066 #define f2s(flag) (pd->flags.flag ? "yes" : "no")
1067 #define appendTime(tm) storeAppendPrintf(e, "%s\t %10ld\t %+d\t %+d\n", \
1068 ""#tm, (long int)pd->times.tm, \
1069 saneDiff(pd->times.tm - squid_curtime), \
1070 saneDiff(pd->times.tm - pd->times.initialized))
1071
1072 assert(pd);
1073
1074 const char *host = pd->host.termedBuf();
1075 storeAppendPrintf(e, "\npeer digest from %s\n", host);
1076
1077 cacheDigestGuessStatsReport(&pd->stats.guess, e, host);
1078
1079 storeAppendPrintf(e, "\nevent\t timestamp\t secs from now\t secs from init\n");
1080 appendTime(initialized);
1081 appendTime(needed);
1082 appendTime(requested);
1083 appendTime(received);
1084 appendTime(next_check);
1085
1086 storeAppendPrintf(e, "peer digest state:\n");
1087 storeAppendPrintf(e, "\tneeded: %3s, usable: %3s, requested: %3s\n",
1088 f2s(needed), f2s(usable), f2s(requested));
1089 storeAppendPrintf(e, "\n\tlast retry delay: %d secs\n",
1090 (int) pd->times.retry_delay);
1091 storeAppendPrintf(e, "\tlast request response time: %d secs\n",
1092 (int) pd->times.req_delay);
1093 storeAppendPrintf(e, "\tlast request result: %s\n",
1094 pd->req_result ? pd->req_result : "(none)");
1095
1096 storeAppendPrintf(e, "\npeer digest traffic:\n");
1097 storeAppendPrintf(e, "\trequests sent: %d, volume: %d KB\n",
1098 pd->stats.sent.msgs, (int) pd->stats.sent.kbytes.kb);
1099 storeAppendPrintf(e, "\treplies recv: %d, volume: %d KB\n",
1100 pd->stats.recv.msgs, (int) pd->stats.recv.kbytes.kb);
1101
1102 storeAppendPrintf(e, "\npeer digest structure:\n");
1103
1104 if (pd->cd)
1105 cacheDigestReport(pd->cd, host, e);
1106 else
1107 storeAppendPrintf(e, "\tno in-memory copy\n");
1108 }
1109
1110 #endif
1111