]> git.ipfire.org Git - thirdparty/squid.git/blob - src/peer_digest.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / peer_digest.cc
1 /*
2 * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 72 Peer Digest Routines */
10
11 #include "squid.h"
12 #if USE_CACHE_DIGESTS
13 #include "CacheDigest.h"
14 #include "CachePeer.h"
15 #include "event.h"
16 #include "FwdState.h"
17 #include "globals.h"
18 #include "HttpReply.h"
19 #include "HttpRequest.h"
20 #include "internal.h"
21 #include "MemObject.h"
22 #include "mime_header.h"
23 #include "neighbors.h"
24 #include "PeerDigest.h"
25 #include "SquidTime.h"
26 #include "Store.h"
27 #include "store_key_md5.h"
28 #include "StoreClient.h"
29 #include "tools.h"
30 #include "util.h"
31
32 /* local types */
33
34 /* local prototypes */
35 static time_t peerDigestIncDelay(const PeerDigest * pd);
36 static time_t peerDigestNewDelay(const StoreEntry * e);
37 static void peerDigestSetCheck(PeerDigest * pd, time_t delay);
38 static EVH peerDigestCheck;
39 static void peerDigestRequest(PeerDigest * pd);
40 static STCB peerDigestHandleReply;
41 static int peerDigestFetchReply(void *, char *, ssize_t);
42 int peerDigestSwapInHeaders(void *, char *, ssize_t);
43 int peerDigestSwapInCBlock(void *, char *, ssize_t);
44 int peerDigestSwapInMask(void *, char *, ssize_t);
45 static int peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name);
46 static void peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason);
47 static void peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason);
48 static void peerDigestReqFinish(DigestFetchState * fetch, char *buf, int, int, int, const char *reason, int err);
49 static void peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err);
50 static void peerDigestFetchFinish(DigestFetchState * fetch, int err);
51 static void peerDigestFetchSetStats(DigestFetchState * fetch);
52 static int peerDigestSetCBlock(PeerDigest * pd, const char *buf);
53 static int peerDigestUseful(const PeerDigest * pd);
54
55 /* local constants */
56 Version const CacheDigestVer = { 5, 3 };
57
58 #define StoreDigestCBlockSize sizeof(StoreDigestCBlock)
59
60 /* min interval for requesting digests from a given peer */
61 static const time_t PeerDigestReqMinGap = 5 * 60; /* seconds */
62 /* min interval for requesting digests (cumulative request stream) */
63 static const time_t GlobDigestReqMinGap = 1 * 60; /* seconds */
64
65 /* local vars */
66
67 static time_t pd_last_req_time = 0; /* last call to Check */
68
69 /* initialize peer digest */
70 static void
71 peerDigestInit(PeerDigest * pd, CachePeer * p)
72 {
73 assert(pd && p);
74
75 memset(pd, 0, sizeof(*pd));
76 /*
77 * DPW 2007-04-12
78 * Lock on to the peer here. The corresponding cbdataReferenceDone()
79 * is in peerDigestDestroy().
80 */
81 pd->peer = cbdataReference(p);
82 /* if peer disappears, we will know it's name */
83 pd->host = p->host;
84
85 pd->times.initialized = squid_curtime;
86 }
87
88 CBDATA_CLASS_INIT(PeerDigest);
89
90 CBDATA_CLASS_INIT(DigestFetchState);
91
92 DigestFetchState::DigestFetchState(PeerDigest *aPd, HttpRequest *req) :
93 pd(cbdataReference(aPd)),
94 entry(NULL),
95 old_entry(NULL),
96 sc(NULL),
97 old_sc(NULL),
98 request(req),
99 offset(0),
100 mask_offset(0),
101 start_time(squid_curtime),
102 resp_time(0),
103 expires(0),
104 bufofs(0),
105 state(DIGEST_READ_REPLY)
106 {
107 HTTPMSGLOCK(request);
108
109 sent.msg = 0;
110 sent.bytes = 0;
111
112 recv.msg = 0;
113 recv.bytes = 0;
114
115 *buf = 0;
116 }
117
118 DigestFetchState::~DigestFetchState()
119 {
120 /* unlock everything */
121 storeUnregister(sc, entry, this);
122
123 entry->unlock("DigestFetchState destructed");
124 entry = NULL;
125
126 HTTPMSGUNLOCK(request);
127
128 assert(pd == NULL);
129 }
130
131 /* allocate new peer digest, call Init, and lock everything */
132 PeerDigest *
133 peerDigestCreate(CachePeer * p)
134 {
135 PeerDigest *pd;
136 assert(p);
137
138 pd = new PeerDigest;
139 peerDigestInit(pd, p);
140
141 /* XXX This does not look right, and the same thing again in the caller */
142 return cbdataReference(pd);
143 }
144
145 /* call Clean and free/unlock everything */
146 static void
147 peerDigestDestroy(PeerDigest * pd)
148 {
149 void *p;
150 assert(pd);
151 void * peerTmp = pd->peer;
152
153 /*
154 * DPW 2007-04-12
155 * We locked the peer in peerDigestInit(), this is
156 * where we unlock it. If the peer is still valid,
157 * tell it that the digest is gone.
158 */
159 if (cbdataReferenceValidDone(peerTmp, &p))
160 peerNoteDigestGone((CachePeer *)p);
161
162 delete pd->cd;
163 pd->host.clean();
164
165 delete pd;
166 }
167
168 /* called by peer to indicate that somebody actually needs this digest */
169 void
170 peerDigestNeeded(PeerDigest * pd)
171 {
172 assert(pd);
173 assert(!pd->flags.needed);
174 assert(!pd->cd);
175
176 pd->flags.needed = true;
177 pd->times.needed = squid_curtime;
178 peerDigestSetCheck(pd, 0); /* check asap */
179 }
180
181 /* currently we do not have a reason to disable without destroying */
182 #if FUTURE_CODE
183 /* disables peer for good */
184 static void
185 peerDigestDisable(PeerDigest * pd)
186 {
187 debugs(72, 2, "peerDigestDisable: peer " << pd->host.buf() << " disabled for good");
188 pd->times.disabled = squid_curtime;
189 pd->times.next_check = -1; /* never */
190 pd->flags.usable = 0;
191
192 delete pd->cd
193 pd->cd = nullptr;
194
195 /* we do not destroy the pd itself to preserve its "history" and stats */
196 }
197
198 #endif
199
200 /* increment retry delay [after an unsuccessful attempt] */
201 static time_t
202 peerDigestIncDelay(const PeerDigest * pd)
203 {
204 assert(pd);
205 return pd->times.retry_delay > 0 ?
206 2 * pd->times.retry_delay : /* exponential backoff */
207 PeerDigestReqMinGap; /* minimal delay */
208 }
209
210 /* artificially increases Expires: setting to avoid race conditions
211 * returns the delay till that [increased] expiration time */
212 static time_t
213 peerDigestNewDelay(const StoreEntry * e)
214 {
215 assert(e);
216
217 if (e->expires > 0)
218 return e->expires + PeerDigestReqMinGap - squid_curtime;
219
220 return PeerDigestReqMinGap;
221 }
222
223 /* registers next digest verification */
224 static void
225 peerDigestSetCheck(PeerDigest * pd, time_t delay)
226 {
227 eventAdd("peerDigestCheck", peerDigestCheck, pd, (double) delay, 1);
228 pd->times.next_check = squid_curtime + delay;
229 debugs(72, 3, "peerDigestSetCheck: will check peer " << pd->host << " in " << delay << " secs");
230 }
231
232 /*
233 * called when peer is about to disappear or have already disappeared
234 */
235 void
236 peerDigestNotePeerGone(PeerDigest * pd)
237 {
238 if (pd->flags.requested) {
239 debugs(72, 2, "peerDigest: peer " << pd->host << " gone, will destroy after fetch.");
240 /* do nothing now, the fetching chain will notice and take action */
241 } else {
242 debugs(72, 2, "peerDigest: peer " << pd->host << " is gone, destroying now.");
243 peerDigestDestroy(pd);
244 }
245 }
246
247 /* callback for eventAdd() (with peer digest locked)
248 * request new digest if our copy is too old or if we lack one;
249 * schedule next check otherwise */
250 static void
251 peerDigestCheck(void *data)
252 {
253 PeerDigest *pd = (PeerDigest *)data;
254 time_t req_time;
255
256 assert(!pd->flags.requested);
257
258 pd->times.next_check = 0; /* unknown */
259
260 if (!cbdataReferenceValid(pd->peer)) {
261 peerDigestNotePeerGone(pd);
262 return;
263 }
264
265 debugs(72, 3, "peerDigestCheck: peer " << pd->peer->host << ":" << pd->peer->http_port);
266 debugs(72, 3, "peerDigestCheck: time: " << squid_curtime <<
267 ", last received: " << (long int) pd->times.received << " (" <<
268 std::showpos << (int) (squid_curtime - pd->times.received) << ")");
269
270 /* decide when we should send the request:
271 * request now unless too close to other requests */
272 req_time = squid_curtime;
273
274 /* per-peer limit */
275
276 if (req_time - pd->times.received < PeerDigestReqMinGap) {
277 debugs(72, 2, "peerDigestCheck: " << pd->host <<
278 ", avoiding close peer requests (" <<
279 (int) (req_time - pd->times.received) << " < " <<
280 (int) PeerDigestReqMinGap << " secs).");
281
282 req_time = pd->times.received + PeerDigestReqMinGap;
283 }
284
285 /* global limit */
286 if (req_time - pd_last_req_time < GlobDigestReqMinGap) {
287 debugs(72, 2, "peerDigestCheck: " << pd->host <<
288 ", avoiding close requests (" <<
289 (int) (req_time - pd_last_req_time) << " < " <<
290 (int) GlobDigestReqMinGap << " secs).");
291
292 req_time = pd_last_req_time + GlobDigestReqMinGap;
293 }
294
295 if (req_time <= squid_curtime)
296 peerDigestRequest(pd); /* will set pd->flags.requested */
297 else
298 peerDigestSetCheck(pd, req_time - squid_curtime);
299 }
300
301 /* ask store for a digest */
302 static void
303 peerDigestRequest(PeerDigest * pd)
304 {
305 CachePeer *p = pd->peer;
306 StoreEntry *e, *old_e;
307 char *url = NULL;
308 const cache_key *key;
309 HttpRequest *req;
310 StoreIOBuffer tempBuffer;
311
312 pd->req_result = NULL;
313 pd->flags.requested = true;
314
315 /* compute future request components */
316
317 if (p->digest_url)
318 url = xstrdup(p->digest_url);
319 else
320 url = xstrdup(internalRemoteUri(p->host, p->http_port, "/squid-internal-periodic/", SBuf(StoreDigestFileName)));
321
322 req = HttpRequest::CreateFromUrl(url);
323
324 assert(req);
325
326 key = storeKeyPublicByRequest(req);
327
328 debugs(72, 2, "peerDigestRequest: " << url << " key: " << storeKeyText(key));
329
330 /* add custom headers */
331 assert(!req->header.len);
332
333 req->header.putStr(Http::HdrType::ACCEPT, StoreDigestMimeStr);
334
335 req->header.putStr(Http::HdrType::ACCEPT, "text/html");
336
337 if (p->login &&
338 p->login[0] != '*' &&
339 strcmp(p->login, "PASS") != 0 &&
340 strcmp(p->login, "PASSTHRU") != 0 &&
341 strncmp(p->login, "NEGOTIATE",9) != 0 &&
342 strcmp(p->login, "PROXYPASS") != 0) {
343 req->url.userInfo(SBuf(p->login)); // XXX: performance regression make peer login SBuf as well.
344 }
345 /* create fetch state structure */
346 DigestFetchState *fetch = new DigestFetchState(pd, req);
347
348 /* update timestamps */
349 pd->times.requested = squid_curtime;
350 pd_last_req_time = squid_curtime;
351 req->flags.cachable = true;
352
353 /* the rest is based on clientProcessExpired() */
354 req->flags.refresh = true;
355
356 old_e = fetch->old_entry = Store::Root().get(key);
357
358 if (old_e) {
359 debugs(72, 5, "peerDigestRequest: found old entry");
360
361 old_e->lock("peerDigestRequest");
362 old_e->createMemObject(url, url, req->method);
363
364 fetch->old_sc = storeClientListAdd(old_e, fetch);
365 }
366
367 e = fetch->entry = storeCreateEntry(url, url, req->flags, req->method);
368 assert(EBIT_TEST(e->flags, KEY_PRIVATE));
369 fetch->sc = storeClientListAdd(e, fetch);
370 /* set lastmod to trigger IMS request if possible */
371
372 if (old_e)
373 e->lastModified(old_e->lastModified());
374
375 /* push towards peer cache */
376 debugs(72, 3, "peerDigestRequest: forwarding to fwdStart...");
377
378 FwdState::fwdStart(Comm::ConnectionPointer(), e, req);
379
380 tempBuffer.offset = 0;
381
382 tempBuffer.length = SM_PAGE_SIZE;
383
384 tempBuffer.data = fetch->buf;
385
386 storeClientCopy(fetch->sc, e, tempBuffer,
387 peerDigestHandleReply, fetch);
388
389 safe_free(url);
390 }
391
392 /* Handle the data copying .. */
393
394 /*
395 * This routine handles the copy data and then redirects the
396 * copy to a bunch of subfunctions depending upon the copy state.
397 * It also tracks the buffer offset and "seen", since I'm actually
398 * not interested in rewriting everything to suit my little idea.
399 */
400 static void
401 peerDigestHandleReply(void *data, StoreIOBuffer receivedData)
402 {
403 DigestFetchState *fetch = (DigestFetchState *)data;
404 int retsize = -1;
405 digest_read_state_t prevstate;
406 int newsize;
407
408 assert(fetch->pd && receivedData.data);
409 /* The existing code assumes that the received pointer is
410 * where we asked the data to be put
411 */
412 assert(fetch->buf + fetch->bufofs == receivedData.data);
413
414 /* Update the buffer size */
415 fetch->bufofs += receivedData.length;
416
417 assert(fetch->bufofs <= SM_PAGE_SIZE);
418
419 /* If we've fetched enough, return */
420
421 if (peerDigestFetchedEnough(fetch, fetch->buf, fetch->bufofs, "peerDigestHandleReply"))
422 return;
423
424 /* Call the right function based on the state */
425 /* (Those functions will update the state if needed) */
426
427 /* Give us a temporary reference. Some of the calls we make may
428 * try to destroy the fetch structure, and we like to know if they
429 * do
430 */
431 CbcPointer<DigestFetchState> tmpLock = fetch;
432
433 /* Repeat this loop until we're out of data OR the state changes */
434 /* (So keep going if the state has changed and we still have data */
435 do {
436 prevstate = fetch->state;
437
438 switch (fetch->state) {
439
440 case DIGEST_READ_REPLY:
441 retsize = peerDigestFetchReply(fetch, fetch->buf, fetch->bufofs);
442 break;
443
444 case DIGEST_READ_HEADERS:
445 retsize = peerDigestSwapInHeaders(fetch, fetch->buf, fetch->bufofs);
446 break;
447
448 case DIGEST_READ_CBLOCK:
449 retsize = peerDigestSwapInCBlock(fetch, fetch->buf, fetch->bufofs);
450 break;
451
452 case DIGEST_READ_MASK:
453 retsize = peerDigestSwapInMask(fetch, fetch->buf, fetch->bufofs);
454 break;
455
456 case DIGEST_READ_NONE:
457 break;
458
459 case DIGEST_READ_DONE:
460 return;
461 break;
462
463 default:
464 fatal("Bad digest transfer mode!\n");
465 }
466
467 if (retsize < 0)
468 return;
469
470 /*
471 * The returned size indicates how much of the buffer was read -
472 * so move the remainder of the buffer to the beginning
473 * and update the bufofs / bufsize
474 */
475 newsize = fetch->bufofs - retsize;
476
477 memmove(fetch->buf, fetch->buf + retsize, fetch->bufofs - newsize);
478
479 fetch->bufofs = newsize;
480
481 } while (cbdataReferenceValid(fetch) && prevstate != fetch->state && fetch->bufofs > 0);
482
483 /* Update the copy offset */
484 fetch->offset += receivedData.length;
485
486 /* Schedule another copy */
487 if (cbdataReferenceValid(fetch)) {
488 StoreIOBuffer tempBuffer;
489 tempBuffer.offset = fetch->offset;
490 tempBuffer.length = SM_PAGE_SIZE - fetch->bufofs;
491 tempBuffer.data = fetch->buf + fetch->bufofs;
492 storeClientCopy(fetch->sc, fetch->entry, tempBuffer,
493 peerDigestHandleReply, fetch);
494 }
495 }
496
497 /* wait for full http headers to be received then parse them */
498 /*
499 * This routine handles parsing the reply line.
500 * If the reply line indicates an OK, the same data is thrown
501 * to SwapInHeaders(). If the reply line is a NOT_MODIFIED,
502 * we simply stop parsing.
503 */
504 static int
505 peerDigestFetchReply(void *data, char *buf, ssize_t size)
506 {
507 DigestFetchState *fetch = (DigestFetchState *)data;
508 PeerDigest *pd = fetch->pd;
509 size_t hdr_size;
510 assert(pd && buf);
511 assert(!fetch->offset);
512
513 assert(fetch->state == DIGEST_READ_REPLY);
514
515 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply"))
516 return -1;
517
518 if ((hdr_size = headersEnd(buf, size))) {
519 HttpReply const *reply = fetch->entry->getReply();
520 assert(reply);
521 assert(reply->sline.status() != Http::scNone);
522 const Http::StatusCode status = reply->sline.status();
523 debugs(72, 3, "peerDigestFetchReply: " << pd->host << " status: " << status <<
524 ", expires: " << (long int) reply->expires << " (" << std::showpos <<
525 (int) (reply->expires - squid_curtime) << ")");
526
527 /* this "if" is based on clientHandleIMSReply() */
528
529 if (status == Http::scNotModified) {
530 /* our old entry is fine */
531 assert(fetch->old_entry);
532
533 if (!fetch->old_entry->mem_obj->request) {
534 fetch->old_entry->mem_obj->request = fetch->entry->mem_obj->request;
535 HTTPMSGLOCK(fetch->old_entry->mem_obj->request);
536 }
537
538 assert(fetch->old_entry->mem_obj->request);
539
540 Store::Root().updateOnNotModified(fetch->old_entry, *fetch->entry);
541
542 /* get rid of 304 reply */
543 storeUnregister(fetch->sc, fetch->entry, fetch);
544
545 fetch->entry->unlock("peerDigestFetchReply 304");
546
547 fetch->entry = fetch->old_entry;
548
549 fetch->old_entry = NULL;
550
551 /* preserve request -- we need its size to update counters */
552 /* requestUnlink(r); */
553 /* fetch->entry->mem_obj->request = NULL; */
554 } else if (status == Http::scOkay) {
555 /* get rid of old entry if any */
556
557 if (fetch->old_entry) {
558 debugs(72, 3, "peerDigestFetchReply: got new digest, releasing old one");
559 storeUnregister(fetch->old_sc, fetch->old_entry, fetch);
560 fetch->old_entry->releaseRequest();
561 fetch->old_entry->unlock("peerDigestFetchReply 200");
562 fetch->old_entry = NULL;
563 }
564 } else {
565 /* some kind of a bug */
566 peerDigestFetchAbort(fetch, buf, reply->sline.reason());
567 return -1; /* XXX -1 will abort stuff in ReadReply! */
568 }
569
570 /* must have a ready-to-use store entry if we got here */
571 /* can we stay with the old in-memory digest? */
572 if (status == Http::scNotModified && fetch->pd->cd) {
573 peerDigestFetchStop(fetch, buf, "Not modified");
574 fetch->state = DIGEST_READ_DONE;
575 } else {
576 fetch->state = DIGEST_READ_HEADERS;
577 }
578 } else {
579 /* need more data, do we have space? */
580
581 if (size >= SM_PAGE_SIZE)
582 peerDigestFetchAbort(fetch, buf, "reply header too big");
583 }
584
585 /* We don't want to actually ack that we've handled anything,
586 * otherwise SwapInHeaders() won't get the reply line .. */
587 return 0;
588 }
589
590 /* fetch headers from disk, pass on to SwapInCBlock */
591 int
592 peerDigestSwapInHeaders(void *data, char *buf, ssize_t size)
593 {
594 DigestFetchState *fetch = (DigestFetchState *)data;
595 size_t hdr_size;
596
597 assert(fetch->state == DIGEST_READ_HEADERS);
598
599 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInHeaders"))
600 return -1;
601
602 assert(!fetch->offset);
603
604 if ((hdr_size = headersEnd(buf, size))) {
605 assert(fetch->entry->getReply());
606 assert(fetch->entry->getReply()->sline.status() != Http::scNone);
607
608 if (fetch->entry->getReply()->sline.status() != Http::scOkay) {
609 debugs(72, DBG_IMPORTANT, "peerDigestSwapInHeaders: " << fetch->pd->host <<
610 " status " << fetch->entry->getReply()->sline.status() <<
611 " got cached!");
612
613 peerDigestFetchAbort(fetch, buf, "internal status error");
614 return -1;
615 }
616
617 fetch->state = DIGEST_READ_CBLOCK;
618 return hdr_size; /* Say how much data we read */
619 }
620
621 /* need more data, do we have space? */
622 if (size >= SM_PAGE_SIZE) {
623 peerDigestFetchAbort(fetch, buf, "stored header too big");
624 return -1;
625 }
626
627 return 0; /* We need to read more to parse .. */
628 }
629
630 int
631 peerDigestSwapInCBlock(void *data, char *buf, ssize_t size)
632 {
633 DigestFetchState *fetch = (DigestFetchState *)data;
634
635 assert(fetch->state == DIGEST_READ_CBLOCK);
636
637 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInCBlock"))
638 return -1;
639
640 if (size >= (ssize_t)StoreDigestCBlockSize) {
641 PeerDigest *pd = fetch->pd;
642
643 assert(pd && fetch->entry->getReply());
644
645 if (peerDigestSetCBlock(pd, buf)) {
646 /* XXX: soon we will have variable header size */
647 /* switch to CD buffer and fetch digest guts */
648 buf = NULL;
649 assert(pd->cd->mask);
650 fetch->state = DIGEST_READ_MASK;
651 return StoreDigestCBlockSize;
652 } else {
653 peerDigestFetchAbort(fetch, buf, "invalid digest cblock");
654 return -1;
655 }
656 }
657
658 /* need more data, do we have space? */
659 if (size >= SM_PAGE_SIZE) {
660 peerDigestFetchAbort(fetch, buf, "digest cblock too big");
661 return -1;
662 }
663
664 return 0; /* We need more data */
665 }
666
667 int
668 peerDigestSwapInMask(void *data, char *buf, ssize_t size)
669 {
670 DigestFetchState *fetch = (DigestFetchState *)data;
671 PeerDigest *pd;
672
673 pd = fetch->pd;
674 assert(pd->cd && pd->cd->mask);
675
676 /*
677 * NOTENOTENOTENOTENOTE: buf doesn't point to pd->cd->mask anymore!
678 * we need to do the copy ourselves!
679 */
680 memcpy(pd->cd->mask + fetch->mask_offset, buf, size);
681
682 /* NOTE! buf points to the middle of pd->cd->mask! */
683
684 if (peerDigestFetchedEnough(fetch, NULL, size, "peerDigestSwapInMask"))
685 return -1;
686
687 fetch->mask_offset += size;
688
689 if (fetch->mask_offset >= pd->cd->mask_size) {
690 debugs(72, 2, "peerDigestSwapInMask: Done! Got " <<
691 fetch->mask_offset << ", expected " << pd->cd->mask_size);
692 assert(fetch->mask_offset == pd->cd->mask_size);
693 assert(peerDigestFetchedEnough(fetch, NULL, 0, "peerDigestSwapInMask"));
694 return -1; /* XXX! */
695 }
696
697 /* We always read everything, so return size */
698 return size;
699 }
700
701 static int
702 peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name)
703 {
704 PeerDigest *pd = NULL;
705 const char *host = "<unknown>"; /* peer host */
706 const char *reason = NULL; /* reason for completion */
707 const char *no_bug = NULL; /* successful completion if set */
708 const int pdcb_valid = cbdataReferenceValid(fetch->pd);
709 const int pcb_valid = cbdataReferenceValid(fetch->pd->peer);
710
711 /* test possible exiting conditions (the same for most steps!)
712 * cases marked with '?!' should not happen */
713
714 if (!reason) {
715 if (!(pd = fetch->pd))
716 reason = "peer digest disappeared?!";
717
718 #if DONT /* WHY NOT? /HNO */
719
720 else if (!cbdataReferenceValid(pd))
721 reason = "invalidated peer digest?!";
722
723 #endif
724
725 else
726 host = pd->host.termedBuf();
727 }
728
729 debugs(72, 6, step_name << ": peer " << host << ", offset: " <<
730 fetch->offset << " size: " << size << ".");
731
732 /* continue checking (with pd and host known and valid) */
733
734 if (!reason) {
735 if (!cbdataReferenceValid(pd->peer))
736 reason = "peer disappeared";
737 else if (size < 0)
738 reason = "swap failure";
739 else if (!fetch->entry)
740 reason = "swap aborted?!";
741 else if (EBIT_TEST(fetch->entry->flags, ENTRY_ABORTED))
742 reason = "swap aborted";
743 }
744
745 /* continue checking (maybe-successful eof case) */
746 if (!reason && !size) {
747 if (!pd->cd)
748 reason = "null digest?!";
749 else if (fetch->mask_offset != pd->cd->mask_size)
750 reason = "premature end of digest?!";
751 else if (!peerDigestUseful(pd))
752 reason = "useless digest";
753 else
754 reason = no_bug = "success";
755 }
756
757 /* finish if we have a reason */
758 if (reason) {
759 const int level = strstr(reason, "?!") ? 1 : 3;
760 debugs(72, level, "" << step_name << ": peer " << host << ", exiting after '" << reason << "'");
761 peerDigestReqFinish(fetch, buf,
762 1, pdcb_valid, pcb_valid, reason, !no_bug);
763 } else {
764 /* paranoid check */
765 assert(pdcb_valid && pcb_valid);
766 }
767
768 return reason != NULL;
769 }
770
771 /* call this when all callback data is valid and fetch must be stopped but
772 * no error has occurred (e.g. we received 304 reply and reuse old digest) */
773 static void
774 peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason)
775 {
776 assert(reason);
777 debugs(72, 2, "peerDigestFetchStop: peer " << fetch->pd->host << ", reason: " << reason);
778 peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 0);
779 }
780
781 /* call this when all callback data is valid but something bad happened */
782 static void
783 peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason)
784 {
785 assert(reason);
786 debugs(72, 2, "peerDigestFetchAbort: peer " << fetch->pd->host << ", reason: " << reason);
787 peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 1);
788 }
789
790 /* complete the digest transfer, update stats, unlock/release everything */
791 static void
792 peerDigestReqFinish(DigestFetchState * fetch, char *buf,
793 int fcb_valid, int pdcb_valid, int pcb_valid,
794 const char *reason, int err)
795 {
796 assert(reason);
797
798 /* must go before peerDigestPDFinish */
799
800 if (pdcb_valid) {
801 fetch->pd->flags.requested = false;
802 fetch->pd->req_result = reason;
803 }
804
805 /* schedule next check if peer is still out there */
806 if (pcb_valid) {
807 PeerDigest *pd = fetch->pd;
808
809 if (err) {
810 pd->times.retry_delay = peerDigestIncDelay(pd);
811 peerDigestSetCheck(pd, pd->times.retry_delay);
812 } else {
813 pd->times.retry_delay = 0;
814 peerDigestSetCheck(pd, peerDigestNewDelay(fetch->entry));
815 }
816 }
817
818 /* note: order is significant */
819 if (fcb_valid)
820 peerDigestFetchSetStats(fetch);
821
822 if (pdcb_valid)
823 peerDigestPDFinish(fetch, pcb_valid, err);
824
825 if (fcb_valid)
826 peerDigestFetchFinish(fetch, err);
827 }
828
829 /* destroys digest if peer disappeared
830 * must be called only when fetch and pd cbdata are valid */
831 static void
832 peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err)
833 {
834 PeerDigest *pd = fetch->pd;
835 const char *host = pd->host.termedBuf();
836
837 pd->times.received = squid_curtime;
838 pd->times.req_delay = fetch->resp_time;
839 pd->stats.sent.kbytes += fetch->sent.bytes;
840 pd->stats.recv.kbytes += fetch->recv.bytes;
841 pd->stats.sent.msgs += fetch->sent.msg;
842 pd->stats.recv.msgs += fetch->recv.msg;
843
844 if (err) {
845 debugs(72, DBG_IMPORTANT, "" << (pcb_valid ? "temporary " : "" ) << "disabling (" << pd->req_result << ") digest from " << host);
846
847 delete pd->cd;
848 pd->cd = nullptr;
849
850 pd->flags.usable = false;
851
852 if (!pcb_valid)
853 peerDigestNotePeerGone(pd);
854 } else {
855 assert(pcb_valid);
856
857 pd->flags.usable = true;
858
859 /* XXX: ugly condition, but how? */
860
861 if (fetch->entry->store_status == STORE_OK)
862 debugs(72, 2, "re-used old digest from " << host);
863 else
864 debugs(72, 2, "received valid digest from " << host);
865 }
866
867 cbdataReferenceDone(fetch->pd);
868 }
869
870 /* free fetch state structures
871 * must be called only when fetch cbdata is valid */
872 static void
873 peerDigestFetchFinish(DigestFetchState * fetch, int err)
874 {
875 assert(fetch->entry && fetch->request);
876
877 if (fetch->old_entry) {
878 debugs(72, 3, "peerDigestFetchFinish: deleting old entry");
879 storeUnregister(fetch->old_sc, fetch->old_entry, fetch);
880 fetch->old_entry->releaseRequest();
881 fetch->old_entry->unlock("peerDigestFetchFinish old");
882 fetch->old_entry = NULL;
883 }
884
885 /* update global stats */
886 statCounter.cd.kbytes_sent += fetch->sent.bytes;
887 statCounter.cd.kbytes_recv += fetch->recv.bytes;
888 statCounter.cd.msgs_sent += fetch->sent.msg;
889 statCounter.cd.msgs_recv += fetch->recv.msg;
890
891 delete fetch;
892 }
893
894 /* calculate fetch stats after completion */
895 static void
896 peerDigestFetchSetStats(DigestFetchState * fetch)
897 {
898 MemObject *mem;
899 assert(fetch->entry && fetch->request);
900
901 mem = fetch->entry->mem_obj;
902 assert(mem);
903
904 /* XXX: outgoing numbers are not precise */
905 /* XXX: we must distinguish between 304 hits and misses here */
906 fetch->sent.bytes = fetch->request->prefixLen();
907 /* XXX: this is slightly wrong: we don't KNOW that the entire memobject
908 * was fetched. We only know how big it is
909 */
910 fetch->recv.bytes = mem->size();
911 fetch->sent.msg = fetch->recv.msg = 1;
912 fetch->expires = fetch->entry->expires;
913 fetch->resp_time = squid_curtime - fetch->start_time;
914
915 debugs(72, 3, "peerDigestFetchFinish: recv " << fetch->recv.bytes <<
916 " bytes in " << (int) fetch->resp_time << " secs");
917
918 debugs(72, 3, "peerDigestFetchFinish: expires: " <<
919 (long int) fetch->expires << " (" << std::showpos <<
920 (int) (fetch->expires - squid_curtime) << "), lmt: " <<
921 std::noshowpos << (long int) fetch->entry->lastModified() << " (" <<
922 std::showpos << (int) (fetch->entry->lastModified() - squid_curtime) <<
923 ")");
924
925 }
926
927 static int
928 peerDigestSetCBlock(PeerDigest * pd, const char *buf)
929 {
930 StoreDigestCBlock cblock;
931 int freed_size = 0;
932 const char *host = pd->host.termedBuf();
933
934 memcpy(&cblock, buf, sizeof(cblock));
935 /* network -> host conversions */
936 cblock.ver.current = ntohs(cblock.ver.current);
937 cblock.ver.required = ntohs(cblock.ver.required);
938 cblock.capacity = ntohl(cblock.capacity);
939 cblock.count = ntohl(cblock.count);
940 cblock.del_count = ntohl(cblock.del_count);
941 cblock.mask_size = ntohl(cblock.mask_size);
942 debugs(72, 2, "got digest cblock from " << host << "; ver: " <<
943 (int) cblock.ver.current << " (req: " << (int) cblock.ver.required <<
944 ")");
945
946 debugs(72, 2, "\t size: " <<
947 cblock.mask_size << " bytes, e-cnt: " <<
948 cblock.count << ", e-util: " <<
949 xpercentInt(cblock.count, cblock.capacity) << "%" );
950 /* check version requirements (both ways) */
951
952 if (cblock.ver.required > CacheDigestVer.current) {
953 debugs(72, DBG_IMPORTANT, "" << host << " digest requires version " <<
954 cblock.ver.required << "; have: " << CacheDigestVer.current);
955
956 return 0;
957 }
958
959 if (cblock.ver.current < CacheDigestVer.required) {
960 debugs(72, DBG_IMPORTANT, "" << host << " digest is version " <<
961 cblock.ver.current << "; we require: " <<
962 CacheDigestVer.required);
963
964 return 0;
965 }
966
967 /* check consistency */
968 if (cblock.ver.required > cblock.ver.current ||
969 cblock.mask_size <= 0 || cblock.capacity <= 0 ||
970 cblock.bits_per_entry <= 0 || cblock.hash_func_count <= 0) {
971 debugs(72, DBG_CRITICAL, "" << host << " digest cblock is corrupted.");
972 return 0;
973 }
974
975 /* check consistency further */
976 if ((size_t)cblock.mask_size != CacheDigest::CalcMaskSize(cblock.capacity, cblock.bits_per_entry)) {
977 debugs(72, DBG_CRITICAL, host << " digest cblock is corrupted " <<
978 "(mask size mismatch: " << cblock.mask_size << " ? " <<
979 CacheDigest::CalcMaskSize(cblock.capacity, cblock.bits_per_entry)
980 << ").");
981 return 0;
982 }
983
984 /* there are some things we cannot do yet */
985 if (cblock.hash_func_count != CacheDigestHashFuncCount) {
986 debugs(72, DBG_CRITICAL, "" << host << " digest: unsupported #hash functions: " <<
987 cblock.hash_func_count << " ? " << CacheDigestHashFuncCount << ".");
988 return 0;
989 }
990
991 /*
992 * no cblock bugs below this point
993 */
994 /* check size changes */
995 if (pd->cd && cblock.mask_size != (ssize_t)pd->cd->mask_size) {
996 debugs(72, 2, host << " digest changed size: " << cblock.mask_size <<
997 " -> " << pd->cd->mask_size);
998 freed_size = pd->cd->mask_size;
999 delete pd->cd;
1000 pd->cd = nullptr;
1001 }
1002
1003 if (!pd->cd) {
1004 debugs(72, 2, "creating " << host << " digest; size: " << cblock.mask_size << " (" <<
1005 std::showpos << (int) (cblock.mask_size - freed_size) << ") bytes");
1006 pd->cd = new CacheDigest(cblock.capacity, cblock.bits_per_entry);
1007
1008 if (cblock.mask_size >= freed_size)
1009 statCounter.cd.memory += (cblock.mask_size - freed_size);
1010 }
1011
1012 assert(pd->cd);
1013 /* these assignments leave us in an inconsistent state until we finish reading the digest */
1014 pd->cd->count = cblock.count;
1015 pd->cd->del_count = cblock.del_count;
1016 return 1;
1017 }
1018
1019 static int
1020 peerDigestUseful(const PeerDigest * pd)
1021 {
1022 /* TODO: we should calculate the prob of a false hit instead of bit util */
1023 const auto bit_util = pd->cd->usedMaskPercent();
1024
1025 if (bit_util > 65.0) {
1026 debugs(72, DBG_CRITICAL, "Warning: " << pd->host <<
1027 " peer digest has too many bits on (" << bit_util << "%).");
1028 return 0;
1029 }
1030
1031 return 1;
1032 }
1033
1034 static int
1035 saneDiff(time_t diff)
1036 {
1037 return abs((int) diff) > squid_curtime / 2 ? 0 : diff;
1038 }
1039
1040 void
1041 peerDigestStatsReport(const PeerDigest * pd, StoreEntry * e)
1042 {
1043 #define f2s(flag) (pd->flags.flag ? "yes" : "no")
1044 #define appendTime(tm) storeAppendPrintf(e, "%s\t %10ld\t %+d\t %+d\n", \
1045 ""#tm, (long int)pd->times.tm, \
1046 saneDiff(pd->times.tm - squid_curtime), \
1047 saneDiff(pd->times.tm - pd->times.initialized))
1048
1049 assert(pd);
1050
1051 const char *host = pd->host.termedBuf();
1052 storeAppendPrintf(e, "\npeer digest from %s\n", host);
1053
1054 cacheDigestGuessStatsReport(&pd->stats.guess, e, host);
1055
1056 storeAppendPrintf(e, "\nevent\t timestamp\t secs from now\t secs from init\n");
1057 appendTime(initialized);
1058 appendTime(needed);
1059 appendTime(requested);
1060 appendTime(received);
1061 appendTime(next_check);
1062
1063 storeAppendPrintf(e, "peer digest state:\n");
1064 storeAppendPrintf(e, "\tneeded: %3s, usable: %3s, requested: %3s\n",
1065 f2s(needed), f2s(usable), f2s(requested));
1066 storeAppendPrintf(e, "\n\tlast retry delay: %d secs\n",
1067 (int) pd->times.retry_delay);
1068 storeAppendPrintf(e, "\tlast request response time: %d secs\n",
1069 (int) pd->times.req_delay);
1070 storeAppendPrintf(e, "\tlast request result: %s\n",
1071 pd->req_result ? pd->req_result : "(none)");
1072
1073 storeAppendPrintf(e, "\npeer digest traffic:\n");
1074 storeAppendPrintf(e, "\trequests sent: %d, volume: %d KB\n",
1075 pd->stats.sent.msgs, (int) pd->stats.sent.kbytes.kb);
1076 storeAppendPrintf(e, "\treplies recv: %d, volume: %d KB\n",
1077 pd->stats.recv.msgs, (int) pd->stats.recv.kbytes.kb);
1078
1079 storeAppendPrintf(e, "\npeer digest structure:\n");
1080
1081 if (pd->cd)
1082 cacheDigestReport(pd->cd, host, e);
1083 else
1084 storeAppendPrintf(e, "\tno in-memory copy\n");
1085 }
1086
1087 #endif
1088