]> git.ipfire.org Git - thirdparty/squid.git/blob - src/peer_digest.cc
Source Format Enforcement (#532)
[thirdparty/squid.git] / src / peer_digest.cc
1 /*
2 * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 72 Peer Digest Routines */
10
11 #include "squid.h"
12 #if USE_CACHE_DIGESTS
13 #include "CacheDigest.h"
14 #include "CachePeer.h"
15 #include "event.h"
16 #include "FwdState.h"
17 #include "globals.h"
18 #include "HttpReply.h"
19 #include "HttpRequest.h"
20 #include "internal.h"
21 #include "MemObject.h"
22 #include "mime_header.h"
23 #include "neighbors.h"
24 #include "PeerDigest.h"
25 #include "SquidTime.h"
26 #include "Store.h"
27 #include "store_key_md5.h"
28 #include "StoreClient.h"
29 #include "tools.h"
30 #include "util.h"
31
32 /* local types */
33
34 /* local prototypes */
35 static time_t peerDigestIncDelay(const PeerDigest * pd);
36 static time_t peerDigestNewDelay(const StoreEntry * e);
37 static void peerDigestSetCheck(PeerDigest * pd, time_t delay);
38 static EVH peerDigestCheck;
39 static void peerDigestRequest(PeerDigest * pd);
40 static STCB peerDigestHandleReply;
41 static int peerDigestFetchReply(void *, char *, ssize_t);
42 int peerDigestSwapInHeaders(void *, char *, ssize_t);
43 int peerDigestSwapInCBlock(void *, char *, ssize_t);
44 int peerDigestSwapInMask(void *, char *, ssize_t);
45 static int peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name);
46 static void peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason);
47 static void peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason);
48 static void peerDigestReqFinish(DigestFetchState * fetch, char *buf, int, int, int, const char *reason, int err);
49 static void peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err);
50 static void peerDigestFetchFinish(DigestFetchState * fetch, int err);
51 static void peerDigestFetchSetStats(DigestFetchState * fetch);
52 static int peerDigestSetCBlock(PeerDigest * pd, const char *buf);
53 static int peerDigestUseful(const PeerDigest * pd);
54
55 /* local constants */
56 Version const CacheDigestVer = { 5, 3 };
57
58 #define StoreDigestCBlockSize sizeof(StoreDigestCBlock)
59
60 /* min interval for requesting digests from a given peer */
61 static const time_t PeerDigestReqMinGap = 5 * 60; /* seconds */
62 /* min interval for requesting digests (cumulative request stream) */
63 static const time_t GlobDigestReqMinGap = 1 * 60; /* seconds */
64
65 /* local vars */
66
67 static time_t pd_last_req_time = 0; /* last call to Check */
68
69 PeerDigest::PeerDigest(CachePeer * p)
70 {
71 assert(p);
72
73 /*
74 * DPW 2007-04-12
75 * Lock on to the peer here. The corresponding cbdataReferenceDone()
76 * is in peerDigestDestroy().
77 */
78 peer = cbdataReference(p);
79 /* if peer disappears, we will know it's name */
80 host = p->host;
81
82 times.initialized = squid_curtime;
83 }
84
85 CBDATA_CLASS_INIT(PeerDigest);
86
87 CBDATA_CLASS_INIT(DigestFetchState);
88
89 DigestFetchState::DigestFetchState(PeerDigest *aPd, HttpRequest *req) :
90 pd(cbdataReference(aPd)),
91 entry(NULL),
92 old_entry(NULL),
93 sc(NULL),
94 old_sc(NULL),
95 request(req),
96 offset(0),
97 mask_offset(0),
98 start_time(squid_curtime),
99 resp_time(0),
100 expires(0),
101 bufofs(0),
102 state(DIGEST_READ_REPLY)
103 {
104 HTTPMSGLOCK(request);
105
106 sent.msg = 0;
107 sent.bytes = 0;
108
109 recv.msg = 0;
110 recv.bytes = 0;
111
112 *buf = 0;
113 }
114
115 DigestFetchState::~DigestFetchState()
116 {
117 /* unlock everything */
118 storeUnregister(sc, entry, this);
119
120 entry->unlock("DigestFetchState destructed");
121 entry = NULL;
122
123 HTTPMSGUNLOCK(request);
124
125 assert(pd == NULL);
126 }
127
128 /* allocate new peer digest, call Init, and lock everything */
129 void
130 peerDigestCreate(CachePeer * p)
131 {
132 assert(p);
133
134 PeerDigest *pd = new PeerDigest(p);
135
136 // TODO: make CachePeer member a CbcPointer
137 p->digest = cbdataReference(pd);
138
139 // lock a reference to pd again to prevent the PeerDigest
140 // disappearing during peerDigestDestroy() when
141 // cbdataReferenceValidDone is called.
142 // TODO test if it can be moved into peerDigestDestroy() or
143 // if things can break earlier (eg CachePeer death).
144 (void)cbdataReference(pd);
145 }
146
147 /* call Clean and free/unlock everything */
148 static void
149 peerDigestDestroy(PeerDigest * pd)
150 {
151 void *p;
152 assert(pd);
153 void * peerTmp = pd->peer;
154
155 /*
156 * DPW 2007-04-12
157 * We locked the peer in PeerDigest constructor, this is
158 * where we unlock it.
159 */
160 if (cbdataReferenceValidDone(peerTmp, &p)) {
161 // we locked the p->digest in peerDigestCreate()
162 // this is where we unlock that
163 cbdataReferenceDone(static_cast<CachePeer *>(p)->digest);
164 }
165
166 delete pd;
167 }
168
169 PeerDigest::~PeerDigest()
170 {
171 delete cd;
172 // req_result pointer is not owned by us
173 }
174
175 /* called by peer to indicate that somebody actually needs this digest */
176 void
177 peerDigestNeeded(PeerDigest * pd)
178 {
179 assert(pd);
180 assert(!pd->flags.needed);
181 assert(!pd->cd);
182
183 pd->flags.needed = true;
184 pd->times.needed = squid_curtime;
185 peerDigestSetCheck(pd, 0); /* check asap */
186 }
187
188 /* currently we do not have a reason to disable without destroying */
189 #if FUTURE_CODE
190 /* disables peer for good */
191 static void
192 peerDigestDisable(PeerDigest * pd)
193 {
194 debugs(72, 2, "peerDigestDisable: peer " << pd->host.buf() << " disabled for good");
195 pd->times.disabled = squid_curtime;
196 pd->times.next_check = -1; /* never */
197 pd->flags.usable = 0;
198
199 delete pd->cd
200 pd->cd = nullptr;
201
202 /* we do not destroy the pd itself to preserve its "history" and stats */
203 }
204
205 #endif
206
207 /* increment retry delay [after an unsuccessful attempt] */
208 static time_t
209 peerDigestIncDelay(const PeerDigest * pd)
210 {
211 assert(pd);
212 return pd->times.retry_delay > 0 ?
213 2 * pd->times.retry_delay : /* exponential backoff */
214 PeerDigestReqMinGap; /* minimal delay */
215 }
216
217 /* artificially increases Expires: setting to avoid race conditions
218 * returns the delay till that [increased] expiration time */
219 static time_t
220 peerDigestNewDelay(const StoreEntry * e)
221 {
222 assert(e);
223
224 if (e->expires > 0)
225 return e->expires + PeerDigestReqMinGap - squid_curtime;
226
227 return PeerDigestReqMinGap;
228 }
229
230 /* registers next digest verification */
231 static void
232 peerDigestSetCheck(PeerDigest * pd, time_t delay)
233 {
234 eventAdd("peerDigestCheck", peerDigestCheck, pd, (double) delay, 1);
235 pd->times.next_check = squid_curtime + delay;
236 debugs(72, 3, "peerDigestSetCheck: will check peer " << pd->host << " in " << delay << " secs");
237 }
238
239 /*
240 * called when peer is about to disappear or have already disappeared
241 */
242 void
243 peerDigestNotePeerGone(PeerDigest * pd)
244 {
245 if (pd->flags.requested) {
246 debugs(72, 2, "peerDigest: peer " << pd->host << " gone, will destroy after fetch.");
247 /* do nothing now, the fetching chain will notice and take action */
248 } else {
249 debugs(72, 2, "peerDigest: peer " << pd->host << " is gone, destroying now.");
250 peerDigestDestroy(pd);
251 }
252 }
253
254 /* callback for eventAdd() (with peer digest locked)
255 * request new digest if our copy is too old or if we lack one;
256 * schedule next check otherwise */
257 static void
258 peerDigestCheck(void *data)
259 {
260 PeerDigest *pd = (PeerDigest *)data;
261 time_t req_time;
262
263 assert(!pd->flags.requested);
264
265 pd->times.next_check = 0; /* unknown */
266
267 if (!cbdataReferenceValid(pd->peer)) {
268 peerDigestNotePeerGone(pd);
269 return;
270 }
271
272 debugs(72, 3, "peerDigestCheck: peer " << pd->peer->host << ":" << pd->peer->http_port);
273 debugs(72, 3, "peerDigestCheck: time: " << squid_curtime <<
274 ", last received: " << (long int) pd->times.received << " (" <<
275 std::showpos << (int) (squid_curtime - pd->times.received) << ")");
276
277 /* decide when we should send the request:
278 * request now unless too close to other requests */
279 req_time = squid_curtime;
280
281 /* per-peer limit */
282
283 if (req_time - pd->times.received < PeerDigestReqMinGap) {
284 debugs(72, 2, "peerDigestCheck: " << pd->host <<
285 ", avoiding close peer requests (" <<
286 (int) (req_time - pd->times.received) << " < " <<
287 (int) PeerDigestReqMinGap << " secs).");
288
289 req_time = pd->times.received + PeerDigestReqMinGap;
290 }
291
292 /* global limit */
293 if (req_time - pd_last_req_time < GlobDigestReqMinGap) {
294 debugs(72, 2, "peerDigestCheck: " << pd->host <<
295 ", avoiding close requests (" <<
296 (int) (req_time - pd_last_req_time) << " < " <<
297 (int) GlobDigestReqMinGap << " secs).");
298
299 req_time = pd_last_req_time + GlobDigestReqMinGap;
300 }
301
302 if (req_time <= squid_curtime)
303 peerDigestRequest(pd); /* will set pd->flags.requested */
304 else
305 peerDigestSetCheck(pd, req_time - squid_curtime);
306 }
307
308 /* ask store for a digest */
309 static void
310 peerDigestRequest(PeerDigest * pd)
311 {
312 CachePeer *p = pd->peer;
313 StoreEntry *e, *old_e;
314 char *url = NULL;
315 HttpRequest *req;
316 StoreIOBuffer tempBuffer;
317
318 pd->req_result = NULL;
319 pd->flags.requested = true;
320
321 /* compute future request components */
322
323 if (p->digest_url)
324 url = xstrdup(p->digest_url);
325 else
326 url = xstrdup(internalRemoteUri(p->secure.encryptTransport, p->host, p->http_port, "/squid-internal-periodic/", SBuf(StoreDigestFileName)));
327 debugs(72, 2, url);
328
329 const MasterXaction::Pointer mx = new MasterXaction(XactionInitiator::initCacheDigest);
330 req = HttpRequest::FromUrlXXX(url, mx);
331
332 assert(req);
333
334 /* add custom headers */
335 assert(!req->header.len);
336
337 req->header.putStr(Http::HdrType::ACCEPT, StoreDigestMimeStr);
338
339 req->header.putStr(Http::HdrType::ACCEPT, "text/html");
340
341 if (p->login &&
342 p->login[0] != '*' &&
343 strcmp(p->login, "PASS") != 0 &&
344 strcmp(p->login, "PASSTHRU") != 0 &&
345 strncmp(p->login, "NEGOTIATE",9) != 0 &&
346 strcmp(p->login, "PROXYPASS") != 0) {
347 req->url.userInfo(SBuf(p->login)); // XXX: performance regression make peer login SBuf as well.
348 }
349 /* create fetch state structure */
350 DigestFetchState *fetch = new DigestFetchState(pd, req);
351
352 /* update timestamps */
353 pd->times.requested = squid_curtime;
354 pd_last_req_time = squid_curtime;
355 req->flags.cachable = true;
356
357 /* the rest is based on clientReplyContext::processExpired() */
358 req->flags.refresh = true;
359
360 old_e = fetch->old_entry = storeGetPublicByRequest(req);
361
362 // XXX: Missing a hittingRequiresCollapsing() && startCollapsingOn() check.
363 if (old_e) {
364 debugs(72, 5, "found old " << *old_e);
365
366 old_e->lock("peerDigestRequest");
367 old_e->ensureMemObject(url, url, req->method);
368
369 fetch->old_sc = storeClientListAdd(old_e, fetch);
370 }
371
372 e = fetch->entry = storeCreateEntry(url, url, req->flags, req->method);
373 debugs(72, 5, "created " << *e);
374 assert(EBIT_TEST(e->flags, KEY_PRIVATE));
375 fetch->sc = storeClientListAdd(e, fetch);
376 /* set lastmod to trigger IMS request if possible */
377
378 if (old_e)
379 e->lastModified(old_e->lastModified());
380
381 /* push towards peer cache */
382 FwdState::fwdStart(Comm::ConnectionPointer(), e, req);
383
384 tempBuffer.offset = 0;
385
386 tempBuffer.length = SM_PAGE_SIZE;
387
388 tempBuffer.data = fetch->buf;
389
390 storeClientCopy(fetch->sc, e, tempBuffer,
391 peerDigestHandleReply, fetch);
392
393 safe_free(url);
394 }
395
396 /* Handle the data copying .. */
397
398 /*
399 * This routine handles the copy data and then redirects the
400 * copy to a bunch of subfunctions depending upon the copy state.
401 * It also tracks the buffer offset and "seen", since I'm actually
402 * not interested in rewriting everything to suit my little idea.
403 */
404 static void
405 peerDigestHandleReply(void *data, StoreIOBuffer receivedData)
406 {
407 DigestFetchState *fetch = (DigestFetchState *)data;
408 int retsize = -1;
409 digest_read_state_t prevstate;
410 int newsize;
411
412 assert(fetch->pd && receivedData.data);
413 /* The existing code assumes that the received pointer is
414 * where we asked the data to be put
415 */
416 assert(fetch->buf + fetch->bufofs == receivedData.data);
417
418 /* Update the buffer size */
419 fetch->bufofs += receivedData.length;
420
421 assert(fetch->bufofs <= SM_PAGE_SIZE);
422
423 /* If we've fetched enough, return */
424
425 if (peerDigestFetchedEnough(fetch, fetch->buf, fetch->bufofs, "peerDigestHandleReply"))
426 return;
427
428 /* Call the right function based on the state */
429 /* (Those functions will update the state if needed) */
430
431 /* Give us a temporary reference. Some of the calls we make may
432 * try to destroy the fetch structure, and we like to know if they
433 * do
434 */
435 CbcPointer<DigestFetchState> tmpLock = fetch;
436
437 /* Repeat this loop until we're out of data OR the state changes */
438 /* (So keep going if the state has changed and we still have data */
439 do {
440 prevstate = fetch->state;
441
442 switch (fetch->state) {
443
444 case DIGEST_READ_REPLY:
445 retsize = peerDigestFetchReply(fetch, fetch->buf, fetch->bufofs);
446 break;
447
448 case DIGEST_READ_HEADERS:
449 retsize = peerDigestSwapInHeaders(fetch, fetch->buf, fetch->bufofs);
450 break;
451
452 case DIGEST_READ_CBLOCK:
453 retsize = peerDigestSwapInCBlock(fetch, fetch->buf, fetch->bufofs);
454 break;
455
456 case DIGEST_READ_MASK:
457 retsize = peerDigestSwapInMask(fetch, fetch->buf, fetch->bufofs);
458 break;
459
460 case DIGEST_READ_NONE:
461 break;
462
463 case DIGEST_READ_DONE:
464 return;
465 break;
466
467 default:
468 fatal("Bad digest transfer mode!\n");
469 }
470
471 if (retsize < 0)
472 return;
473
474 /*
475 * The returned size indicates how much of the buffer was read -
476 * so move the remainder of the buffer to the beginning
477 * and update the bufofs / bufsize
478 */
479 newsize = fetch->bufofs - retsize;
480
481 memmove(fetch->buf, fetch->buf + retsize, fetch->bufofs - newsize);
482
483 fetch->bufofs = newsize;
484
485 } while (cbdataReferenceValid(fetch) && prevstate != fetch->state && fetch->bufofs > 0);
486
487 /* Update the copy offset */
488 fetch->offset += receivedData.length;
489
490 /* Schedule another copy */
491 if (cbdataReferenceValid(fetch)) {
492 StoreIOBuffer tempBuffer;
493 tempBuffer.offset = fetch->offset;
494 tempBuffer.length = SM_PAGE_SIZE - fetch->bufofs;
495 tempBuffer.data = fetch->buf + fetch->bufofs;
496 storeClientCopy(fetch->sc, fetch->entry, tempBuffer,
497 peerDigestHandleReply, fetch);
498 }
499 }
500
501 /* wait for full http headers to be received then parse them */
502 /*
503 * This routine handles parsing the reply line.
504 * If the reply line indicates an OK, the same data is thrown
505 * to SwapInHeaders(). If the reply line is a NOT_MODIFIED,
506 * we simply stop parsing.
507 */
508 static int
509 peerDigestFetchReply(void *data, char *buf, ssize_t size)
510 {
511 DigestFetchState *fetch = (DigestFetchState *)data;
512 PeerDigest *pd = fetch->pd;
513 size_t hdr_size;
514 assert(pd && buf);
515 assert(!fetch->offset);
516
517 assert(fetch->state == DIGEST_READ_REPLY);
518
519 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply"))
520 return -1;
521
522 if ((hdr_size = headersEnd(buf, size))) {
523 const auto &reply = fetch->entry->mem().freshestReply();
524 const auto status = reply.sline.status();
525 assert(status != Http::scNone);
526 debugs(72, 3, "peerDigestFetchReply: " << pd->host << " status: " << status <<
527 ", expires: " << (long int) reply.expires << " (" << std::showpos <<
528 (int) (reply.expires - squid_curtime) << ")");
529
530 /* this "if" is based on clientHandleIMSReply() */
531
532 if (status == Http::scNotModified) {
533 /* our old entry is fine */
534 assert(fetch->old_entry);
535
536 if (!fetch->old_entry->mem_obj->request)
537 fetch->old_entry->mem_obj->request = fetch->entry->mem_obj->request;
538
539 assert(fetch->old_entry->mem_obj->request);
540
541 Store::Root().updateOnNotModified(fetch->old_entry, *fetch->entry);
542
543 /* get rid of 304 reply */
544 storeUnregister(fetch->sc, fetch->entry, fetch);
545
546 fetch->entry->unlock("peerDigestFetchReply 304");
547
548 fetch->entry = fetch->old_entry;
549
550 fetch->old_entry = NULL;
551
552 /* preserve request -- we need its size to update counters */
553 /* requestUnlink(r); */
554 /* fetch->entry->mem_obj->request = NULL; */
555 } else if (status == Http::scOkay) {
556 /* get rid of old entry if any */
557
558 if (fetch->old_entry) {
559 debugs(72, 3, "peerDigestFetchReply: got new digest, releasing old one");
560 storeUnregister(fetch->old_sc, fetch->old_entry, fetch);
561 fetch->old_entry->releaseRequest();
562 fetch->old_entry->unlock("peerDigestFetchReply 200");
563 fetch->old_entry = NULL;
564 }
565 } else {
566 /* some kind of a bug */
567 peerDigestFetchAbort(fetch, buf, reply.sline.reason());
568 return -1; /* XXX -1 will abort stuff in ReadReply! */
569 }
570
571 /* must have a ready-to-use store entry if we got here */
572 /* can we stay with the old in-memory digest? */
573 if (status == Http::scNotModified && fetch->pd->cd) {
574 peerDigestFetchStop(fetch, buf, "Not modified");
575 fetch->state = DIGEST_READ_DONE;
576 } else {
577 fetch->state = DIGEST_READ_HEADERS;
578 }
579 } else {
580 /* need more data, do we have space? */
581
582 if (size >= SM_PAGE_SIZE)
583 peerDigestFetchAbort(fetch, buf, "reply header too big");
584 }
585
586 /* We don't want to actually ack that we've handled anything,
587 * otherwise SwapInHeaders() won't get the reply line .. */
588 return 0;
589 }
590
591 /* fetch headers from disk, pass on to SwapInCBlock */
592 int
593 peerDigestSwapInHeaders(void *data, char *buf, ssize_t size)
594 {
595 DigestFetchState *fetch = (DigestFetchState *)data;
596 size_t hdr_size;
597
598 assert(fetch->state == DIGEST_READ_HEADERS);
599
600 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInHeaders"))
601 return -1;
602
603 assert(!fetch->offset);
604
605 if ((hdr_size = headersEnd(buf, size))) {
606 const auto &reply = fetch->entry->mem().freshestReply();
607 const auto status = reply.sline.status();
608 assert(status != Http::scNone);
609
610 if (status != Http::scOkay) {
611 debugs(72, DBG_IMPORTANT, "peerDigestSwapInHeaders: " << fetch->pd->host <<
612 " status " << status << " got cached!");
613
614 peerDigestFetchAbort(fetch, buf, "internal status error");
615 return -1;
616 }
617
618 fetch->state = DIGEST_READ_CBLOCK;
619 return hdr_size; /* Say how much data we read */
620 }
621
622 /* need more data, do we have space? */
623 if (size >= SM_PAGE_SIZE) {
624 peerDigestFetchAbort(fetch, buf, "stored header too big");
625 return -1;
626 }
627
628 return 0; /* We need to read more to parse .. */
629 }
630
631 int
632 peerDigestSwapInCBlock(void *data, char *buf, ssize_t size)
633 {
634 DigestFetchState *fetch = (DigestFetchState *)data;
635
636 assert(fetch->state == DIGEST_READ_CBLOCK);
637
638 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInCBlock"))
639 return -1;
640
641 if (size >= (ssize_t)StoreDigestCBlockSize) {
642 PeerDigest *pd = fetch->pd;
643
644 assert(pd);
645 assert(fetch->entry->mem_obj);
646
647 if (peerDigestSetCBlock(pd, buf)) {
648 /* XXX: soon we will have variable header size */
649 /* switch to CD buffer and fetch digest guts */
650 buf = NULL;
651 assert(pd->cd->mask);
652 fetch->state = DIGEST_READ_MASK;
653 return StoreDigestCBlockSize;
654 } else {
655 peerDigestFetchAbort(fetch, buf, "invalid digest cblock");
656 return -1;
657 }
658 }
659
660 /* need more data, do we have space? */
661 if (size >= SM_PAGE_SIZE) {
662 peerDigestFetchAbort(fetch, buf, "digest cblock too big");
663 return -1;
664 }
665
666 return 0; /* We need more data */
667 }
668
669 int
670 peerDigestSwapInMask(void *data, char *buf, ssize_t size)
671 {
672 DigestFetchState *fetch = (DigestFetchState *)data;
673 PeerDigest *pd;
674
675 pd = fetch->pd;
676 assert(pd->cd && pd->cd->mask);
677
678 /*
679 * NOTENOTENOTENOTENOTE: buf doesn't point to pd->cd->mask anymore!
680 * we need to do the copy ourselves!
681 */
682 memcpy(pd->cd->mask + fetch->mask_offset, buf, size);
683
684 /* NOTE! buf points to the middle of pd->cd->mask! */
685
686 if (peerDigestFetchedEnough(fetch, NULL, size, "peerDigestSwapInMask"))
687 return -1;
688
689 fetch->mask_offset += size;
690
691 if (fetch->mask_offset >= pd->cd->mask_size) {
692 debugs(72, 2, "peerDigestSwapInMask: Done! Got " <<
693 fetch->mask_offset << ", expected " << pd->cd->mask_size);
694 assert(fetch->mask_offset == pd->cd->mask_size);
695 assert(peerDigestFetchedEnough(fetch, NULL, 0, "peerDigestSwapInMask"));
696 return -1; /* XXX! */
697 }
698
699 /* We always read everything, so return size */
700 return size;
701 }
702
703 static int
704 peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name)
705 {
706 static const SBuf hostUnknown("<unknown>"); // peer host (if any)
707 SBuf host = hostUnknown;
708
709 PeerDigest *pd = NULL;
710 const char *reason = NULL; /* reason for completion */
711 const char *no_bug = NULL; /* successful completion if set */
712 const int pdcb_valid = cbdataReferenceValid(fetch->pd);
713 const int pcb_valid = cbdataReferenceValid(fetch->pd->peer);
714
715 /* test possible exiting conditions (the same for most steps!)
716 * cases marked with '?!' should not happen */
717
718 if (!reason) {
719 if (!(pd = fetch->pd))
720 reason = "peer digest disappeared?!";
721
722 #if DONT /* WHY NOT? /HNO */
723
724 else if (!cbdataReferenceValid(pd))
725 reason = "invalidated peer digest?!";
726
727 #endif
728
729 else
730 host = pd->host;
731 }
732
733 debugs(72, 6, step_name << ": peer " << host << ", offset: " <<
734 fetch->offset << " size: " << size << ".");
735
736 /* continue checking (with pd and host known and valid) */
737
738 if (!reason) {
739 if (!cbdataReferenceValid(pd->peer))
740 reason = "peer disappeared";
741 else if (size < 0)
742 reason = "swap failure";
743 else if (!fetch->entry)
744 reason = "swap aborted?!";
745 else if (EBIT_TEST(fetch->entry->flags, ENTRY_ABORTED))
746 reason = "swap aborted";
747 }
748
749 /* continue checking (maybe-successful eof case) */
750 if (!reason && !size) {
751 if (!pd->cd)
752 reason = "null digest?!";
753 else if (fetch->mask_offset != pd->cd->mask_size)
754 reason = "premature end of digest?!";
755 else if (!peerDigestUseful(pd))
756 reason = "useless digest";
757 else
758 reason = no_bug = "success";
759 }
760
761 /* finish if we have a reason */
762 if (reason) {
763 const int level = strstr(reason, "?!") ? 1 : 3;
764 debugs(72, level, "" << step_name << ": peer " << host << ", exiting after '" << reason << "'");
765 peerDigestReqFinish(fetch, buf,
766 1, pdcb_valid, pcb_valid, reason, !no_bug);
767 } else {
768 /* paranoid check */
769 assert(pdcb_valid && pcb_valid);
770 }
771
772 return reason != NULL;
773 }
774
775 /* call this when all callback data is valid and fetch must be stopped but
776 * no error has occurred (e.g. we received 304 reply and reuse old digest) */
777 static void
778 peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason)
779 {
780 assert(reason);
781 debugs(72, 2, "peerDigestFetchStop: peer " << fetch->pd->host << ", reason: " << reason);
782 peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 0);
783 }
784
785 /* call this when all callback data is valid but something bad happened */
786 static void
787 peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason)
788 {
789 assert(reason);
790 debugs(72, 2, "peerDigestFetchAbort: peer " << fetch->pd->host << ", reason: " << reason);
791 peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 1);
792 }
793
794 /* complete the digest transfer, update stats, unlock/release everything */
795 static void
796 peerDigestReqFinish(DigestFetchState * fetch, char *buf,
797 int fcb_valid, int pdcb_valid, int pcb_valid,
798 const char *reason, int err)
799 {
800 assert(reason);
801
802 /* must go before peerDigestPDFinish */
803
804 if (pdcb_valid) {
805 fetch->pd->flags.requested = false;
806 fetch->pd->req_result = reason;
807 }
808
809 /* schedule next check if peer is still out there */
810 if (pcb_valid) {
811 PeerDigest *pd = fetch->pd;
812
813 if (err) {
814 pd->times.retry_delay = peerDigestIncDelay(pd);
815 peerDigestSetCheck(pd, pd->times.retry_delay);
816 } else {
817 pd->times.retry_delay = 0;
818 peerDigestSetCheck(pd, peerDigestNewDelay(fetch->entry));
819 }
820 }
821
822 /* note: order is significant */
823 if (fcb_valid)
824 peerDigestFetchSetStats(fetch);
825
826 if (pdcb_valid)
827 peerDigestPDFinish(fetch, pcb_valid, err);
828
829 if (fcb_valid)
830 peerDigestFetchFinish(fetch, err);
831 }
832
833 /* destroys digest if peer disappeared
834 * must be called only when fetch and pd cbdata are valid */
835 static void
836 peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err)
837 {
838 PeerDigest *pd = fetch->pd;
839 const auto host = pd->host;
840 pd->times.received = squid_curtime;
841 pd->times.req_delay = fetch->resp_time;
842 pd->stats.sent.kbytes += fetch->sent.bytes;
843 pd->stats.recv.kbytes += fetch->recv.bytes;
844 pd->stats.sent.msgs += fetch->sent.msg;
845 pd->stats.recv.msgs += fetch->recv.msg;
846
847 if (err) {
848 debugs(72, DBG_IMPORTANT, "" << (pcb_valid ? "temporary " : "" ) << "disabling (" << pd->req_result << ") digest from " << host);
849
850 delete pd->cd;
851 pd->cd = nullptr;
852
853 pd->flags.usable = false;
854
855 if (!pcb_valid)
856 peerDigestNotePeerGone(pd);
857 } else {
858 assert(pcb_valid);
859
860 pd->flags.usable = true;
861
862 /* XXX: ugly condition, but how? */
863
864 if (fetch->entry->store_status == STORE_OK)
865 debugs(72, 2, "re-used old digest from " << host);
866 else
867 debugs(72, 2, "received valid digest from " << host);
868 }
869
870 cbdataReferenceDone(fetch->pd);
871 }
872
873 /* free fetch state structures
874 * must be called only when fetch cbdata is valid */
875 static void
876 peerDigestFetchFinish(DigestFetchState * fetch, int err)
877 {
878 assert(fetch->entry && fetch->request);
879
880 if (fetch->old_entry) {
881 debugs(72, 3, "peerDigestFetchFinish: deleting old entry");
882 storeUnregister(fetch->old_sc, fetch->old_entry, fetch);
883 fetch->old_entry->releaseRequest();
884 fetch->old_entry->unlock("peerDigestFetchFinish old");
885 fetch->old_entry = NULL;
886 }
887
888 /* update global stats */
889 statCounter.cd.kbytes_sent += fetch->sent.bytes;
890 statCounter.cd.kbytes_recv += fetch->recv.bytes;
891 statCounter.cd.msgs_sent += fetch->sent.msg;
892 statCounter.cd.msgs_recv += fetch->recv.msg;
893
894 delete fetch;
895 }
896
897 /* calculate fetch stats after completion */
898 static void
899 peerDigestFetchSetStats(DigestFetchState * fetch)
900 {
901 MemObject *mem;
902 assert(fetch->entry && fetch->request);
903
904 mem = fetch->entry->mem_obj;
905 assert(mem);
906
907 /* XXX: outgoing numbers are not precise */
908 /* XXX: we must distinguish between 304 hits and misses here */
909 fetch->sent.bytes = fetch->request->prefixLen();
910 /* XXX: this is slightly wrong: we don't KNOW that the entire memobject
911 * was fetched. We only know how big it is
912 */
913 fetch->recv.bytes = mem->size();
914 fetch->sent.msg = fetch->recv.msg = 1;
915 fetch->expires = fetch->entry->expires;
916 fetch->resp_time = squid_curtime - fetch->start_time;
917
918 debugs(72, 3, "peerDigestFetchFinish: recv " << fetch->recv.bytes <<
919 " bytes in " << (int) fetch->resp_time << " secs");
920
921 debugs(72, 3, "peerDigestFetchFinish: expires: " <<
922 (long int) fetch->expires << " (" << std::showpos <<
923 (int) (fetch->expires - squid_curtime) << "), lmt: " <<
924 std::noshowpos << (long int) fetch->entry->lastModified() << " (" <<
925 std::showpos << (int) (fetch->entry->lastModified() - squid_curtime) <<
926 ")");
927
928 }
929
930 static int
931 peerDigestSetCBlock(PeerDigest * pd, const char *buf)
932 {
933 StoreDigestCBlock cblock;
934 int freed_size = 0;
935 const auto host = pd->host;
936
937 memcpy(&cblock, buf, sizeof(cblock));
938 /* network -> host conversions */
939 cblock.ver.current = ntohs(cblock.ver.current);
940 cblock.ver.required = ntohs(cblock.ver.required);
941 cblock.capacity = ntohl(cblock.capacity);
942 cblock.count = ntohl(cblock.count);
943 cblock.del_count = ntohl(cblock.del_count);
944 cblock.mask_size = ntohl(cblock.mask_size);
945 debugs(72, 2, "got digest cblock from " << host << "; ver: " <<
946 (int) cblock.ver.current << " (req: " << (int) cblock.ver.required <<
947 ")");
948
949 debugs(72, 2, "\t size: " <<
950 cblock.mask_size << " bytes, e-cnt: " <<
951 cblock.count << ", e-util: " <<
952 xpercentInt(cblock.count, cblock.capacity) << "%" );
953 /* check version requirements (both ways) */
954
955 if (cblock.ver.required > CacheDigestVer.current) {
956 debugs(72, DBG_IMPORTANT, "" << host << " digest requires version " <<
957 cblock.ver.required << "; have: " << CacheDigestVer.current);
958
959 return 0;
960 }
961
962 if (cblock.ver.current < CacheDigestVer.required) {
963 debugs(72, DBG_IMPORTANT, "" << host << " digest is version " <<
964 cblock.ver.current << "; we require: " <<
965 CacheDigestVer.required);
966
967 return 0;
968 }
969
970 /* check consistency */
971 if (cblock.ver.required > cblock.ver.current ||
972 cblock.mask_size <= 0 || cblock.capacity <= 0 ||
973 cblock.bits_per_entry <= 0 || cblock.hash_func_count <= 0) {
974 debugs(72, DBG_CRITICAL, "" << host << " digest cblock is corrupted.");
975 return 0;
976 }
977
978 /* check consistency further */
979 if ((size_t)cblock.mask_size != CacheDigest::CalcMaskSize(cblock.capacity, cblock.bits_per_entry)) {
980 debugs(72, DBG_CRITICAL, host << " digest cblock is corrupted " <<
981 "(mask size mismatch: " << cblock.mask_size << " ? " <<
982 CacheDigest::CalcMaskSize(cblock.capacity, cblock.bits_per_entry)
983 << ").");
984 return 0;
985 }
986
987 /* there are some things we cannot do yet */
988 if (cblock.hash_func_count != CacheDigestHashFuncCount) {
989 debugs(72, DBG_CRITICAL, "" << host << " digest: unsupported #hash functions: " <<
990 cblock.hash_func_count << " ? " << CacheDigestHashFuncCount << ".");
991 return 0;
992 }
993
994 /*
995 * no cblock bugs below this point
996 */
997 /* check size changes */
998 if (pd->cd && cblock.mask_size != (ssize_t)pd->cd->mask_size) {
999 debugs(72, 2, host << " digest changed size: " << cblock.mask_size <<
1000 " -> " << pd->cd->mask_size);
1001 freed_size = pd->cd->mask_size;
1002 delete pd->cd;
1003 pd->cd = nullptr;
1004 }
1005
1006 if (!pd->cd) {
1007 debugs(72, 2, "creating " << host << " digest; size: " << cblock.mask_size << " (" <<
1008 std::showpos << (int) (cblock.mask_size - freed_size) << ") bytes");
1009 pd->cd = new CacheDigest(cblock.capacity, cblock.bits_per_entry);
1010
1011 if (cblock.mask_size >= freed_size)
1012 statCounter.cd.memory += (cblock.mask_size - freed_size);
1013 }
1014
1015 assert(pd->cd);
1016 /* these assignments leave us in an inconsistent state until we finish reading the digest */
1017 pd->cd->count = cblock.count;
1018 pd->cd->del_count = cblock.del_count;
1019 return 1;
1020 }
1021
1022 static int
1023 peerDigestUseful(const PeerDigest * pd)
1024 {
1025 /* TODO: we should calculate the prob of a false hit instead of bit util */
1026 const auto bit_util = pd->cd->usedMaskPercent();
1027
1028 if (bit_util > 65.0) {
1029 debugs(72, DBG_CRITICAL, "Warning: " << pd->host <<
1030 " peer digest has too many bits on (" << bit_util << "%).");
1031 return 0;
1032 }
1033
1034 return 1;
1035 }
1036
1037 static int
1038 saneDiff(time_t diff)
1039 {
1040 return abs((int) diff) > squid_curtime / 2 ? 0 : diff;
1041 }
1042
1043 void
1044 peerDigestStatsReport(const PeerDigest * pd, StoreEntry * e)
1045 {
1046 #define f2s(flag) (pd->flags.flag ? "yes" : "no")
1047 #define appendTime(tm) storeAppendPrintf(e, "%s\t %10ld\t %+d\t %+d\n", \
1048 ""#tm, (long int)pd->times.tm, \
1049 saneDiff(pd->times.tm - squid_curtime), \
1050 saneDiff(pd->times.tm - pd->times.initialized))
1051
1052 assert(pd);
1053
1054 auto host = pd->host;
1055 storeAppendPrintf(e, "\npeer digest from " SQUIDSBUFPH "\n", SQUIDSBUFPRINT(host));
1056
1057 cacheDigestGuessStatsReport(&pd->stats.guess, e, host);
1058
1059 storeAppendPrintf(e, "\nevent\t timestamp\t secs from now\t secs from init\n");
1060 appendTime(initialized);
1061 appendTime(needed);
1062 appendTime(requested);
1063 appendTime(received);
1064 appendTime(next_check);
1065
1066 storeAppendPrintf(e, "peer digest state:\n");
1067 storeAppendPrintf(e, "\tneeded: %3s, usable: %3s, requested: %3s\n",
1068 f2s(needed), f2s(usable), f2s(requested));
1069 storeAppendPrintf(e, "\n\tlast retry delay: %d secs\n",
1070 (int) pd->times.retry_delay);
1071 storeAppendPrintf(e, "\tlast request response time: %d secs\n",
1072 (int) pd->times.req_delay);
1073 storeAppendPrintf(e, "\tlast request result: %s\n",
1074 pd->req_result ? pd->req_result : "(none)");
1075
1076 storeAppendPrintf(e, "\npeer digest traffic:\n");
1077 storeAppendPrintf(e, "\trequests sent: %d, volume: %d KB\n",
1078 pd->stats.sent.msgs, (int) pd->stats.sent.kbytes.kb);
1079 storeAppendPrintf(e, "\treplies recv: %d, volume: %d KB\n",
1080 pd->stats.recv.msgs, (int) pd->stats.recv.kbytes.kb);
1081
1082 storeAppendPrintf(e, "\npeer digest structure:\n");
1083
1084 if (pd->cd)
1085 cacheDigestReport(pd->cd, host, e);
1086 else
1087 storeAppendPrintf(e, "\tno in-memory copy\n");
1088 }
1089
1090 #endif
1091