]> git.ipfire.org Git - thirdparty/squid.git/blob - src/peer_digest.cc
Maintenance: Removed most NULLs using modernize-use-nullptr (#1075)
[thirdparty/squid.git] / src / peer_digest.cc
1 /*
2 * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 72 Peer Digest Routines */
10
11 #include "squid.h"
12 #if USE_CACHE_DIGESTS
13 #include "CacheDigest.h"
14 #include "CachePeer.h"
15 #include "event.h"
16 #include "FwdState.h"
17 #include "globals.h"
18 #include "HttpReply.h"
19 #include "HttpRequest.h"
20 #include "internal.h"
21 #include "MemObject.h"
22 #include "mime_header.h"
23 #include "neighbors.h"
24 #include "PeerDigest.h"
25 #include "Store.h"
26 #include "store_key_md5.h"
27 #include "StoreClient.h"
28 #include "tools.h"
29 #include "util.h"
30
31 /* local types */
32
33 /* local prototypes */
34 static time_t peerDigestIncDelay(const PeerDigest * pd);
35 static time_t peerDigestNewDelay(const StoreEntry * e);
36 static void peerDigestSetCheck(PeerDigest * pd, time_t delay);
37 static EVH peerDigestCheck;
38 static void peerDigestRequest(PeerDigest * pd);
39 static STCB peerDigestHandleReply;
40 static int peerDigestFetchReply(void *, char *, ssize_t);
41 int peerDigestSwapInHeaders(void *, char *, ssize_t);
42 int peerDigestSwapInCBlock(void *, char *, ssize_t);
43 int peerDigestSwapInMask(void *, char *, ssize_t);
44 static int peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name);
45 static void peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason);
46 static void peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason);
47 static void peerDigestReqFinish(DigestFetchState * fetch, char *buf, int, int, int, const char *reason, int err);
48 static void peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err);
49 static void peerDigestFetchFinish(DigestFetchState * fetch, int err);
50 static void peerDigestFetchSetStats(DigestFetchState * fetch);
51 static int peerDigestSetCBlock(PeerDigest * pd, const char *buf);
52 static int peerDigestUseful(const PeerDigest * pd);
53
54 /* local constants */
55 Version const CacheDigestVer = { 5, 3 };
56
57 #define StoreDigestCBlockSize sizeof(StoreDigestCBlock)
58
59 /* min interval for requesting digests from a given peer */
60 static const time_t PeerDigestReqMinGap = 5 * 60; /* seconds */
61 /* min interval for requesting digests (cumulative request stream) */
62 static const time_t GlobDigestReqMinGap = 1 * 60; /* seconds */
63
64 /* local vars */
65
66 static time_t pd_last_req_time = 0; /* last call to Check */
67
68 PeerDigest::PeerDigest(CachePeer * p)
69 {
70 assert(p);
71
72 /*
73 * DPW 2007-04-12
74 * Lock on to the peer here. The corresponding cbdataReferenceDone()
75 * is in peerDigestDestroy().
76 */
77 peer = cbdataReference(p);
78 /* if peer disappears, we will know it's name */
79 host = p->host;
80
81 times.initialized = squid_curtime;
82 }
83
84 CBDATA_CLASS_INIT(PeerDigest);
85
86 CBDATA_CLASS_INIT(DigestFetchState);
87
88 DigestFetchState::DigestFetchState(PeerDigest *aPd, HttpRequest *req) :
89 pd(cbdataReference(aPd)),
90 entry(nullptr),
91 old_entry(nullptr),
92 sc(nullptr),
93 old_sc(nullptr),
94 request(req),
95 offset(0),
96 mask_offset(0),
97 start_time(squid_curtime),
98 resp_time(0),
99 expires(0),
100 bufofs(0),
101 state(DIGEST_READ_REPLY)
102 {
103 HTTPMSGLOCK(request);
104
105 sent.msg = 0;
106 sent.bytes = 0;
107
108 recv.msg = 0;
109 recv.bytes = 0;
110
111 *buf = 0;
112 }
113
114 DigestFetchState::~DigestFetchState()
115 {
116 /* unlock everything */
117 storeUnregister(sc, entry, this);
118
119 entry->unlock("DigestFetchState destructed");
120 entry = nullptr;
121
122 HTTPMSGUNLOCK(request);
123
124 assert(pd == nullptr);
125 }
126
127 /* allocate new peer digest, call Init, and lock everything */
128 void
129 peerDigestCreate(CachePeer * p)
130 {
131 assert(p);
132
133 PeerDigest *pd = new PeerDigest(p);
134
135 // TODO: make CachePeer member a CbcPointer
136 p->digest = cbdataReference(pd);
137
138 // lock a reference to pd again to prevent the PeerDigest
139 // disappearing during peerDigestDestroy() when
140 // cbdataReferenceValidDone is called.
141 // TODO test if it can be moved into peerDigestDestroy() or
142 // if things can break earlier (eg CachePeer death).
143 (void)cbdataReference(pd);
144 }
145
146 /* call Clean and free/unlock everything */
147 static void
148 peerDigestDestroy(PeerDigest * pd)
149 {
150 void *p;
151 assert(pd);
152 void * peerTmp = pd->peer;
153
154 /*
155 * DPW 2007-04-12
156 * We locked the peer in PeerDigest constructor, this is
157 * where we unlock it.
158 */
159 if (cbdataReferenceValidDone(peerTmp, &p)) {
160 // we locked the p->digest in peerDigestCreate()
161 // this is where we unlock that
162 cbdataReferenceDone(static_cast<CachePeer *>(p)->digest);
163 }
164
165 delete pd;
166 }
167
168 PeerDigest::~PeerDigest()
169 {
170 delete cd;
171 // req_result pointer is not owned by us
172 }
173
174 /* called by peer to indicate that somebody actually needs this digest */
175 void
176 peerDigestNeeded(PeerDigest * pd)
177 {
178 assert(pd);
179 assert(!pd->flags.needed);
180 assert(!pd->cd);
181
182 pd->flags.needed = true;
183 pd->times.needed = squid_curtime;
184 peerDigestSetCheck(pd, 0); /* check asap */
185 }
186
187 /* increment retry delay [after an unsuccessful attempt] */
188 static time_t
189 peerDigestIncDelay(const PeerDigest * pd)
190 {
191 assert(pd);
192 return pd->times.retry_delay > 0 ?
193 2 * pd->times.retry_delay : /* exponential backoff */
194 PeerDigestReqMinGap; /* minimal delay */
195 }
196
197 /* artificially increases Expires: setting to avoid race conditions
198 * returns the delay till that [increased] expiration time */
199 static time_t
200 peerDigestNewDelay(const StoreEntry * e)
201 {
202 assert(e);
203
204 if (e->expires > 0)
205 return e->expires + PeerDigestReqMinGap - squid_curtime;
206
207 return PeerDigestReqMinGap;
208 }
209
210 /* registers next digest verification */
211 static void
212 peerDigestSetCheck(PeerDigest * pd, time_t delay)
213 {
214 eventAdd("peerDigestCheck", peerDigestCheck, pd, (double) delay, 1);
215 pd->times.next_check = squid_curtime + delay;
216 debugs(72, 3, "peerDigestSetCheck: will check peer " << pd->host << " in " << delay << " secs");
217 }
218
219 /*
220 * called when peer is about to disappear or have already disappeared
221 */
222 void
223 peerDigestNotePeerGone(PeerDigest * pd)
224 {
225 if (pd->flags.requested) {
226 debugs(72, 2, "peerDigest: peer " << pd->host << " gone, will destroy after fetch.");
227 /* do nothing now, the fetching chain will notice and take action */
228 } else {
229 debugs(72, 2, "peerDigest: peer " << pd->host << " is gone, destroying now.");
230 peerDigestDestroy(pd);
231 }
232 }
233
234 /* callback for eventAdd() (with peer digest locked)
235 * request new digest if our copy is too old or if we lack one;
236 * schedule next check otherwise */
237 static void
238 peerDigestCheck(void *data)
239 {
240 PeerDigest *pd = (PeerDigest *)data;
241 time_t req_time;
242
243 assert(!pd->flags.requested);
244
245 pd->times.next_check = 0; /* unknown */
246
247 if (!cbdataReferenceValid(pd->peer)) {
248 peerDigestNotePeerGone(pd);
249 return;
250 }
251
252 debugs(72, 3, "peerDigestCheck: peer " << pd->peer->host << ":" << pd->peer->http_port);
253 debugs(72, 3, "peerDigestCheck: time: " << squid_curtime <<
254 ", last received: " << (long int) pd->times.received << " (" <<
255 std::showpos << (int) (squid_curtime - pd->times.received) << ")");
256
257 /* decide when we should send the request:
258 * request now unless too close to other requests */
259 req_time = squid_curtime;
260
261 /* per-peer limit */
262
263 if (req_time - pd->times.received < PeerDigestReqMinGap) {
264 debugs(72, 2, "peerDigestCheck: " << pd->host <<
265 ", avoiding close peer requests (" <<
266 (int) (req_time - pd->times.received) << " < " <<
267 (int) PeerDigestReqMinGap << " secs).");
268
269 req_time = pd->times.received + PeerDigestReqMinGap;
270 }
271
272 /* global limit */
273 if (req_time - pd_last_req_time < GlobDigestReqMinGap) {
274 debugs(72, 2, "peerDigestCheck: " << pd->host <<
275 ", avoiding close requests (" <<
276 (int) (req_time - pd_last_req_time) << " < " <<
277 (int) GlobDigestReqMinGap << " secs).");
278
279 req_time = pd_last_req_time + GlobDigestReqMinGap;
280 }
281
282 if (req_time <= squid_curtime)
283 peerDigestRequest(pd); /* will set pd->flags.requested */
284 else
285 peerDigestSetCheck(pd, req_time - squid_curtime);
286 }
287
288 /* ask store for a digest */
289 static void
290 peerDigestRequest(PeerDigest * pd)
291 {
292 CachePeer *p = pd->peer;
293 StoreEntry *e, *old_e;
294 char *url = nullptr;
295 HttpRequest *req;
296 StoreIOBuffer tempBuffer;
297
298 pd->req_result = nullptr;
299 pd->flags.requested = true;
300
301 /* compute future request components */
302
303 if (p->digest_url)
304 url = xstrdup(p->digest_url);
305 else
306 url = xstrdup(internalRemoteUri(p->secure.encryptTransport, p->host, p->http_port, "/squid-internal-periodic/", SBuf(StoreDigestFileName)));
307 debugs(72, 2, url);
308
309 const auto mx = MasterXaction::MakePortless<XactionInitiator::initCacheDigest>();
310 req = HttpRequest::FromUrlXXX(url, mx);
311
312 assert(req);
313
314 /* add custom headers */
315 assert(!req->header.len);
316
317 req->header.putStr(Http::HdrType::ACCEPT, StoreDigestMimeStr);
318
319 req->header.putStr(Http::HdrType::ACCEPT, "text/html");
320
321 if (p->login &&
322 p->login[0] != '*' &&
323 strcmp(p->login, "PASS") != 0 &&
324 strcmp(p->login, "PASSTHRU") != 0 &&
325 strncmp(p->login, "NEGOTIATE",9) != 0 &&
326 strcmp(p->login, "PROXYPASS") != 0) {
327 req->url.userInfo(SBuf(p->login)); // XXX: performance regression make peer login SBuf as well.
328 }
329 /* create fetch state structure */
330 DigestFetchState *fetch = new DigestFetchState(pd, req);
331
332 /* update timestamps */
333 pd->times.requested = squid_curtime;
334 pd_last_req_time = squid_curtime;
335 req->flags.cachable = true;
336
337 /* the rest is based on clientReplyContext::processExpired() */
338 req->flags.refresh = true;
339
340 old_e = fetch->old_entry = storeGetPublicByRequest(req);
341
342 // XXX: Missing a hittingRequiresCollapsing() && startCollapsingOn() check.
343 if (old_e) {
344 debugs(72, 5, "found old " << *old_e);
345
346 old_e->lock("peerDigestRequest");
347 old_e->ensureMemObject(url, url, req->method);
348
349 fetch->old_sc = storeClientListAdd(old_e, fetch);
350 }
351
352 e = fetch->entry = storeCreateEntry(url, url, req->flags, req->method);
353 debugs(72, 5, "created " << *e);
354 assert(EBIT_TEST(e->flags, KEY_PRIVATE));
355 fetch->sc = storeClientListAdd(e, fetch);
356 /* set lastmod to trigger IMS request if possible */
357
358 if (old_e)
359 e->lastModified(old_e->lastModified());
360
361 /* push towards peer cache */
362 FwdState::fwdStart(Comm::ConnectionPointer(), e, req);
363
364 tempBuffer.offset = 0;
365
366 tempBuffer.length = SM_PAGE_SIZE;
367
368 tempBuffer.data = fetch->buf;
369
370 storeClientCopy(fetch->sc, e, tempBuffer,
371 peerDigestHandleReply, fetch);
372
373 safe_free(url);
374 }
375
376 /* Handle the data copying .. */
377
378 /*
379 * This routine handles the copy data and then redirects the
380 * copy to a bunch of subfunctions depending upon the copy state.
381 * It also tracks the buffer offset and "seen", since I'm actually
382 * not interested in rewriting everything to suit my little idea.
383 */
384 static void
385 peerDigestHandleReply(void *data, StoreIOBuffer receivedData)
386 {
387 DigestFetchState *fetch = (DigestFetchState *)data;
388 int retsize = -1;
389 digest_read_state_t prevstate;
390 int newsize;
391
392 assert(fetch->pd && receivedData.data);
393 /* The existing code assumes that the received pointer is
394 * where we asked the data to be put
395 */
396 assert(fetch->buf + fetch->bufofs == receivedData.data);
397
398 /* Update the buffer size */
399 fetch->bufofs += receivedData.length;
400
401 assert(fetch->bufofs <= SM_PAGE_SIZE);
402
403 /* If we've fetched enough, return */
404
405 if (peerDigestFetchedEnough(fetch, fetch->buf, fetch->bufofs, "peerDigestHandleReply"))
406 return;
407
408 /* Call the right function based on the state */
409 /* (Those functions will update the state if needed) */
410
411 /* Give us a temporary reference. Some of the calls we make may
412 * try to destroy the fetch structure, and we like to know if they
413 * do
414 */
415 CbcPointer<DigestFetchState> tmpLock = fetch;
416
417 /* Repeat this loop until we're out of data OR the state changes */
418 /* (So keep going if the state has changed and we still have data */
419 do {
420 prevstate = fetch->state;
421
422 switch (fetch->state) {
423
424 case DIGEST_READ_REPLY:
425 retsize = peerDigestFetchReply(fetch, fetch->buf, fetch->bufofs);
426 break;
427
428 case DIGEST_READ_HEADERS:
429 retsize = peerDigestSwapInHeaders(fetch, fetch->buf, fetch->bufofs);
430 break;
431
432 case DIGEST_READ_CBLOCK:
433 retsize = peerDigestSwapInCBlock(fetch, fetch->buf, fetch->bufofs);
434 break;
435
436 case DIGEST_READ_MASK:
437 retsize = peerDigestSwapInMask(fetch, fetch->buf, fetch->bufofs);
438 break;
439
440 case DIGEST_READ_NONE:
441 break;
442
443 case DIGEST_READ_DONE:
444 return;
445 break;
446
447 default:
448 fatal("Bad digest transfer mode!\n");
449 }
450
451 if (retsize < 0)
452 return;
453
454 /*
455 * The returned size indicates how much of the buffer was read -
456 * so move the remainder of the buffer to the beginning
457 * and update the bufofs / bufsize
458 */
459 newsize = fetch->bufofs - retsize;
460
461 memmove(fetch->buf, fetch->buf + retsize, fetch->bufofs - newsize);
462
463 fetch->bufofs = newsize;
464
465 } while (cbdataReferenceValid(fetch) && prevstate != fetch->state && fetch->bufofs > 0);
466
467 // Check for EOF here, thus giving the parser one extra run. We could avoid this overhead by
468 // checking at the beginning of this function. However, in this case, we would have to require
469 // that the parser does not regard EOF as a special condition (it is true now but may change
470 // in the future).
471 if (!receivedData.length) { // EOF
472 peerDigestFetchAbort(fetch, fetch->buf, "premature end of digest reply");
473 return;
474 }
475
476 /* Update the copy offset */
477 fetch->offset += receivedData.length;
478
479 /* Schedule another copy */
480 if (cbdataReferenceValid(fetch)) {
481 StoreIOBuffer tempBuffer;
482 tempBuffer.offset = fetch->offset;
483 tempBuffer.length = SM_PAGE_SIZE - fetch->bufofs;
484 tempBuffer.data = fetch->buf + fetch->bufofs;
485 storeClientCopy(fetch->sc, fetch->entry, tempBuffer,
486 peerDigestHandleReply, fetch);
487 }
488 }
489
490 /* wait for full http headers to be received then parse them */
491 /*
492 * This routine handles parsing the reply line.
493 * If the reply line indicates an OK, the same data is thrown
494 * to SwapInHeaders(). If the reply line is a NOT_MODIFIED,
495 * we simply stop parsing.
496 */
497 static int
498 peerDigestFetchReply(void *data, char *buf, ssize_t size)
499 {
500 DigestFetchState *fetch = (DigestFetchState *)data;
501 PeerDigest *pd = fetch->pd;
502 size_t hdr_size;
503 assert(pd && buf);
504 assert(!fetch->offset);
505
506 assert(fetch->state == DIGEST_READ_REPLY);
507
508 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply"))
509 return -1;
510
511 if ((hdr_size = headersEnd(buf, size))) {
512 const auto &reply = fetch->entry->mem().freshestReply();
513 const auto status = reply.sline.status();
514 assert(status != Http::scNone);
515 debugs(72, 3, "peerDigestFetchReply: " << pd->host << " status: " << status <<
516 ", expires: " << (long int) reply.expires << " (" << std::showpos <<
517 (int) (reply.expires - squid_curtime) << ")");
518
519 /* this "if" is based on clientHandleIMSReply() */
520
521 if (status == Http::scNotModified) {
522 /* our old entry is fine */
523 assert(fetch->old_entry);
524
525 if (!fetch->old_entry->mem_obj->request)
526 fetch->old_entry->mem_obj->request = fetch->entry->mem_obj->request;
527
528 assert(fetch->old_entry->mem_obj->request);
529
530 Store::Root().updateOnNotModified(fetch->old_entry, *fetch->entry);
531
532 /* get rid of 304 reply */
533 storeUnregister(fetch->sc, fetch->entry, fetch);
534
535 fetch->entry->unlock("peerDigestFetchReply 304");
536
537 fetch->entry = fetch->old_entry;
538
539 fetch->old_entry = nullptr;
540
541 /* preserve request -- we need its size to update counters */
542 /* requestUnlink(r); */
543 /* fetch->entry->mem_obj->request = NULL; */
544 } else if (status == Http::scOkay) {
545 /* get rid of old entry if any */
546
547 if (fetch->old_entry) {
548 debugs(72, 3, "peerDigestFetchReply: got new digest, releasing old one");
549 storeUnregister(fetch->old_sc, fetch->old_entry, fetch);
550 fetch->old_entry->releaseRequest();
551 fetch->old_entry->unlock("peerDigestFetchReply 200");
552 fetch->old_entry = nullptr;
553 }
554 } else {
555 /* some kind of a bug */
556 peerDigestFetchAbort(fetch, buf, reply.sline.reason());
557 return -1; /* XXX -1 will abort stuff in ReadReply! */
558 }
559
560 /* must have a ready-to-use store entry if we got here */
561 /* can we stay with the old in-memory digest? */
562 if (status == Http::scNotModified && fetch->pd->cd) {
563 peerDigestFetchStop(fetch, buf, "Not modified");
564 fetch->state = DIGEST_READ_DONE;
565 } else {
566 fetch->state = DIGEST_READ_HEADERS;
567 }
568 } else {
569 /* need more data, do we have space? */
570
571 if (size >= SM_PAGE_SIZE)
572 peerDigestFetchAbort(fetch, buf, "reply header too big");
573 }
574
575 /* We don't want to actually ack that we've handled anything,
576 * otherwise SwapInHeaders() won't get the reply line .. */
577 return 0;
578 }
579
580 /* fetch headers from disk, pass on to SwapInCBlock */
581 int
582 peerDigestSwapInHeaders(void *data, char *buf, ssize_t size)
583 {
584 DigestFetchState *fetch = (DigestFetchState *)data;
585 size_t hdr_size;
586
587 assert(fetch->state == DIGEST_READ_HEADERS);
588
589 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInHeaders"))
590 return -1;
591
592 assert(!fetch->offset);
593
594 if ((hdr_size = headersEnd(buf, size))) {
595 const auto &reply = fetch->entry->mem().freshestReply();
596 const auto status = reply.sline.status();
597 assert(status != Http::scNone);
598
599 if (status != Http::scOkay) {
600 debugs(72, DBG_IMPORTANT, "peerDigestSwapInHeaders: " << fetch->pd->host <<
601 " status " << status << " got cached!");
602
603 peerDigestFetchAbort(fetch, buf, "internal status error");
604 return -1;
605 }
606
607 fetch->state = DIGEST_READ_CBLOCK;
608 return hdr_size; /* Say how much data we read */
609 }
610
611 /* need more data, do we have space? */
612 if (size >= SM_PAGE_SIZE) {
613 peerDigestFetchAbort(fetch, buf, "stored header too big");
614 return -1;
615 }
616
617 return 0; /* We need to read more to parse .. */
618 }
619
620 int
621 peerDigestSwapInCBlock(void *data, char *buf, ssize_t size)
622 {
623 DigestFetchState *fetch = (DigestFetchState *)data;
624
625 assert(fetch->state == DIGEST_READ_CBLOCK);
626
627 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInCBlock"))
628 return -1;
629
630 if (size >= (ssize_t)StoreDigestCBlockSize) {
631 PeerDigest *pd = fetch->pd;
632
633 assert(pd);
634 assert(fetch->entry->mem_obj);
635
636 if (peerDigestSetCBlock(pd, buf)) {
637 /* XXX: soon we will have variable header size */
638 /* switch to CD buffer and fetch digest guts */
639 buf = nullptr;
640 assert(pd->cd->mask);
641 fetch->state = DIGEST_READ_MASK;
642 return StoreDigestCBlockSize;
643 } else {
644 peerDigestFetchAbort(fetch, buf, "invalid digest cblock");
645 return -1;
646 }
647 }
648
649 /* need more data, do we have space? */
650 if (size >= SM_PAGE_SIZE) {
651 peerDigestFetchAbort(fetch, buf, "digest cblock too big");
652 return -1;
653 }
654
655 return 0; /* We need more data */
656 }
657
658 int
659 peerDigestSwapInMask(void *data, char *buf, ssize_t size)
660 {
661 DigestFetchState *fetch = (DigestFetchState *)data;
662 PeerDigest *pd;
663
664 pd = fetch->pd;
665 assert(pd->cd && pd->cd->mask);
666
667 /*
668 * NOTENOTENOTENOTENOTE: buf doesn't point to pd->cd->mask anymore!
669 * we need to do the copy ourselves!
670 */
671 memcpy(pd->cd->mask + fetch->mask_offset, buf, size);
672
673 /* NOTE! buf points to the middle of pd->cd->mask! */
674
675 if (peerDigestFetchedEnough(fetch, nullptr, size, "peerDigestSwapInMask"))
676 return -1;
677
678 fetch->mask_offset += size;
679
680 if (fetch->mask_offset >= pd->cd->mask_size) {
681 debugs(72, 2, "peerDigestSwapInMask: Done! Got " <<
682 fetch->mask_offset << ", expected " << pd->cd->mask_size);
683 assert(fetch->mask_offset == pd->cd->mask_size);
684 assert(peerDigestFetchedEnough(fetch, nullptr, 0, "peerDigestSwapInMask"));
685 return -1; /* XXX! */
686 }
687
688 /* We always read everything, so return size */
689 return size;
690 }
691
692 static int
693 peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name)
694 {
695 static const SBuf hostUnknown("<unknown>"); // peer host (if any)
696 SBuf host = hostUnknown;
697
698 PeerDigest *pd = nullptr;
699 const char *reason = nullptr; /* reason for completion */
700 const char *no_bug = nullptr; /* successful completion if set */
701 const int pdcb_valid = cbdataReferenceValid(fetch->pd);
702 const int pcb_valid = cbdataReferenceValid(fetch->pd->peer);
703
704 /* test possible exiting conditions (the same for most steps!)
705 * cases marked with '?!' should not happen */
706
707 if (!reason) {
708 if (!(pd = fetch->pd))
709 reason = "peer digest disappeared?!";
710 else
711 host = pd->host;
712 }
713
714 debugs(72, 6, step_name << ": peer " << host << ", offset: " <<
715 fetch->offset << " size: " << size << ".");
716
717 /* continue checking (with pd and host known and valid) */
718
719 if (!reason) {
720 if (!cbdataReferenceValid(pd->peer))
721 reason = "peer disappeared";
722 else if (size < 0)
723 reason = "swap failure";
724 else if (!fetch->entry)
725 reason = "swap aborted?!";
726 else if (EBIT_TEST(fetch->entry->flags, ENTRY_ABORTED))
727 reason = "swap aborted";
728 }
729
730 /* continue checking (maybe-successful eof case) */
731 if (!reason && !size) {
732 if (!pd->cd)
733 reason = "null digest?!";
734 else if (fetch->mask_offset != pd->cd->mask_size)
735 reason = "premature end of digest?!";
736 else if (!peerDigestUseful(pd))
737 reason = "useless digest";
738 else
739 reason = no_bug = "success";
740 }
741
742 /* finish if we have a reason */
743 if (reason) {
744 const int level = strstr(reason, "?!") ? 1 : 3;
745 debugs(72, level, "" << step_name << ": peer " << host << ", exiting after '" << reason << "'");
746 peerDigestReqFinish(fetch, buf,
747 1, pdcb_valid, pcb_valid, reason, !no_bug);
748 } else {
749 /* paranoid check */
750 assert(pdcb_valid && pcb_valid);
751 }
752
753 return reason != nullptr;
754 }
755
756 /* call this when all callback data is valid and fetch must be stopped but
757 * no error has occurred (e.g. we received 304 reply and reuse old digest) */
758 static void
759 peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason)
760 {
761 assert(reason);
762 debugs(72, 2, "peerDigestFetchStop: peer " << fetch->pd->host << ", reason: " << reason);
763 peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 0);
764 }
765
766 /* call this when all callback data is valid but something bad happened */
767 static void
768 peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason)
769 {
770 assert(reason);
771 debugs(72, 2, "peerDigestFetchAbort: peer " << fetch->pd->host << ", reason: " << reason);
772 peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 1);
773 }
774
775 /* complete the digest transfer, update stats, unlock/release everything */
776 static void
777 peerDigestReqFinish(DigestFetchState * fetch, char * /* buf */,
778 int fcb_valid, int pdcb_valid, int pcb_valid,
779 const char *reason, int err)
780 {
781 assert(reason);
782
783 /* must go before peerDigestPDFinish */
784
785 if (pdcb_valid) {
786 fetch->pd->flags.requested = false;
787 fetch->pd->req_result = reason;
788 }
789
790 /* schedule next check if peer is still out there */
791 if (pcb_valid) {
792 PeerDigest *pd = fetch->pd;
793
794 if (err) {
795 pd->times.retry_delay = peerDigestIncDelay(pd);
796 peerDigestSetCheck(pd, pd->times.retry_delay);
797 } else {
798 pd->times.retry_delay = 0;
799 peerDigestSetCheck(pd, peerDigestNewDelay(fetch->entry));
800 }
801 }
802
803 /* note: order is significant */
804 if (fcb_valid)
805 peerDigestFetchSetStats(fetch);
806
807 if (pdcb_valid)
808 peerDigestPDFinish(fetch, pcb_valid, err);
809
810 if (fcb_valid)
811 peerDigestFetchFinish(fetch, err);
812 }
813
814 /* destroys digest if peer disappeared
815 * must be called only when fetch and pd cbdata are valid */
816 static void
817 peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err)
818 {
819 PeerDigest *pd = fetch->pd;
820 const auto host = pd->host;
821 pd->times.received = squid_curtime;
822 pd->times.req_delay = fetch->resp_time;
823 pd->stats.sent.kbytes += fetch->sent.bytes;
824 pd->stats.recv.kbytes += fetch->recv.bytes;
825 pd->stats.sent.msgs += fetch->sent.msg;
826 pd->stats.recv.msgs += fetch->recv.msg;
827
828 if (err) {
829 debugs(72, DBG_IMPORTANT, "" << (pcb_valid ? "temporary " : "" ) << "disabling (" << pd->req_result << ") digest from " << host);
830
831 delete pd->cd;
832 pd->cd = nullptr;
833
834 pd->flags.usable = false;
835
836 if (!pcb_valid)
837 peerDigestNotePeerGone(pd);
838 } else {
839 assert(pcb_valid);
840
841 pd->flags.usable = true;
842
843 /* XXX: ugly condition, but how? */
844
845 if (fetch->entry->store_status == STORE_OK)
846 debugs(72, 2, "re-used old digest from " << host);
847 else
848 debugs(72, 2, "received valid digest from " << host);
849 }
850
851 cbdataReferenceDone(fetch->pd);
852 }
853
854 /* free fetch state structures
855 * must be called only when fetch cbdata is valid */
856 static void
857 peerDigestFetchFinish(DigestFetchState * fetch, int /* err */)
858 {
859 assert(fetch->entry && fetch->request);
860
861 if (fetch->old_entry) {
862 debugs(72, 3, "peerDigestFetchFinish: deleting old entry");
863 storeUnregister(fetch->old_sc, fetch->old_entry, fetch);
864 fetch->old_entry->releaseRequest();
865 fetch->old_entry->unlock("peerDigestFetchFinish old");
866 fetch->old_entry = nullptr;
867 }
868
869 /* update global stats */
870 statCounter.cd.kbytes_sent += fetch->sent.bytes;
871 statCounter.cd.kbytes_recv += fetch->recv.bytes;
872 statCounter.cd.msgs_sent += fetch->sent.msg;
873 statCounter.cd.msgs_recv += fetch->recv.msg;
874
875 delete fetch;
876 }
877
878 /* calculate fetch stats after completion */
879 static void
880 peerDigestFetchSetStats(DigestFetchState * fetch)
881 {
882 MemObject *mem;
883 assert(fetch->entry && fetch->request);
884
885 mem = fetch->entry->mem_obj;
886 assert(mem);
887
888 /* XXX: outgoing numbers are not precise */
889 /* XXX: we must distinguish between 304 hits and misses here */
890 fetch->sent.bytes = fetch->request->prefixLen();
891 /* XXX: this is slightly wrong: we don't KNOW that the entire memobject
892 * was fetched. We only know how big it is
893 */
894 fetch->recv.bytes = mem->size();
895 fetch->sent.msg = fetch->recv.msg = 1;
896 fetch->expires = fetch->entry->expires;
897 fetch->resp_time = squid_curtime - fetch->start_time;
898
899 debugs(72, 3, "peerDigestFetchFinish: recv " << fetch->recv.bytes <<
900 " bytes in " << (int) fetch->resp_time << " secs");
901
902 debugs(72, 3, "peerDigestFetchFinish: expires: " <<
903 (long int) fetch->expires << " (" << std::showpos <<
904 (int) (fetch->expires - squid_curtime) << "), lmt: " <<
905 std::noshowpos << (long int) fetch->entry->lastModified() << " (" <<
906 std::showpos << (int) (fetch->entry->lastModified() - squid_curtime) <<
907 ")");
908
909 }
910
911 static int
912 peerDigestSetCBlock(PeerDigest * pd, const char *buf)
913 {
914 StoreDigestCBlock cblock;
915 int freed_size = 0;
916 const auto host = pd->host;
917
918 memcpy(&cblock, buf, sizeof(cblock));
919 /* network -> host conversions */
920 cblock.ver.current = ntohs(cblock.ver.current);
921 cblock.ver.required = ntohs(cblock.ver.required);
922 cblock.capacity = ntohl(cblock.capacity);
923 cblock.count = ntohl(cblock.count);
924 cblock.del_count = ntohl(cblock.del_count);
925 cblock.mask_size = ntohl(cblock.mask_size);
926 debugs(72, 2, "got digest cblock from " << host << "; ver: " <<
927 (int) cblock.ver.current << " (req: " << (int) cblock.ver.required <<
928 ")");
929
930 debugs(72, 2, "\t size: " <<
931 cblock.mask_size << " bytes, e-cnt: " <<
932 cblock.count << ", e-util: " <<
933 xpercentInt(cblock.count, cblock.capacity) << "%" );
934 /* check version requirements (both ways) */
935
936 if (cblock.ver.required > CacheDigestVer.current) {
937 debugs(72, DBG_IMPORTANT, "" << host << " digest requires version " <<
938 cblock.ver.required << "; have: " << CacheDigestVer.current);
939
940 return 0;
941 }
942
943 if (cblock.ver.current < CacheDigestVer.required) {
944 debugs(72, DBG_IMPORTANT, "" << host << " digest is version " <<
945 cblock.ver.current << "; we require: " <<
946 CacheDigestVer.required);
947
948 return 0;
949 }
950
951 /* check consistency */
952 if (cblock.ver.required > cblock.ver.current ||
953 cblock.mask_size <= 0 || cblock.capacity <= 0 ||
954 cblock.bits_per_entry <= 0 || cblock.hash_func_count <= 0) {
955 debugs(72, DBG_CRITICAL, "" << host << " digest cblock is corrupted.");
956 return 0;
957 }
958
959 /* check consistency further */
960 if ((size_t)cblock.mask_size != CacheDigest::CalcMaskSize(cblock.capacity, cblock.bits_per_entry)) {
961 debugs(72, DBG_CRITICAL, host << " digest cblock is corrupted " <<
962 "(mask size mismatch: " << cblock.mask_size << " ? " <<
963 CacheDigest::CalcMaskSize(cblock.capacity, cblock.bits_per_entry)
964 << ").");
965 return 0;
966 }
967
968 /* there are some things we cannot do yet */
969 if (cblock.hash_func_count != CacheDigestHashFuncCount) {
970 debugs(72, DBG_CRITICAL, "ERROR: " << host << " digest: unsupported #hash functions: " <<
971 cblock.hash_func_count << " ? " << CacheDigestHashFuncCount << ".");
972 return 0;
973 }
974
975 /*
976 * no cblock bugs below this point
977 */
978 /* check size changes */
979 if (pd->cd && cblock.mask_size != (ssize_t)pd->cd->mask_size) {
980 debugs(72, 2, host << " digest changed size: " << cblock.mask_size <<
981 " -> " << pd->cd->mask_size);
982 freed_size = pd->cd->mask_size;
983 delete pd->cd;
984 pd->cd = nullptr;
985 }
986
987 if (!pd->cd) {
988 debugs(72, 2, "creating " << host << " digest; size: " << cblock.mask_size << " (" <<
989 std::showpos << (int) (cblock.mask_size - freed_size) << ") bytes");
990 pd->cd = new CacheDigest(cblock.capacity, cblock.bits_per_entry);
991
992 if (cblock.mask_size >= freed_size)
993 statCounter.cd.memory += (cblock.mask_size - freed_size);
994 }
995
996 assert(pd->cd);
997 /* these assignments leave us in an inconsistent state until we finish reading the digest */
998 pd->cd->count = cblock.count;
999 pd->cd->del_count = cblock.del_count;
1000 return 1;
1001 }
1002
1003 static int
1004 peerDigestUseful(const PeerDigest * pd)
1005 {
1006 /* TODO: we should calculate the prob of a false hit instead of bit util */
1007 const auto bit_util = pd->cd->usedMaskPercent();
1008
1009 if (bit_util > 65.0) {
1010 debugs(72, DBG_CRITICAL, "WARNING: " << pd->host <<
1011 " peer digest has too many bits on (" << bit_util << "%).");
1012 return 0;
1013 }
1014
1015 return 1;
1016 }
1017
1018 static int
1019 saneDiff(time_t diff)
1020 {
1021 return abs((int) diff) > squid_curtime / 2 ? 0 : diff;
1022 }
1023
1024 void
1025 peerDigestStatsReport(const PeerDigest * pd, StoreEntry * e)
1026 {
1027 #define f2s(flag) (pd->flags.flag ? "yes" : "no")
1028 #define appendTime(tm) storeAppendPrintf(e, "%s\t %10ld\t %+d\t %+d\n", \
1029 ""#tm, (long int)pd->times.tm, \
1030 saneDiff(pd->times.tm - squid_curtime), \
1031 saneDiff(pd->times.tm - pd->times.initialized))
1032
1033 assert(pd);
1034
1035 auto host = pd->host;
1036 storeAppendPrintf(e, "\npeer digest from " SQUIDSBUFPH "\n", SQUIDSBUFPRINT(host));
1037
1038 cacheDigestGuessStatsReport(&pd->stats.guess, e, host);
1039
1040 storeAppendPrintf(e, "\nevent\t timestamp\t secs from now\t secs from init\n");
1041 appendTime(initialized);
1042 appendTime(needed);
1043 appendTime(requested);
1044 appendTime(received);
1045 appendTime(next_check);
1046
1047 storeAppendPrintf(e, "peer digest state:\n");
1048 storeAppendPrintf(e, "\tneeded: %3s, usable: %3s, requested: %3s\n",
1049 f2s(needed), f2s(usable), f2s(requested));
1050 storeAppendPrintf(e, "\n\tlast retry delay: %d secs\n",
1051 (int) pd->times.retry_delay);
1052 storeAppendPrintf(e, "\tlast request response time: %d secs\n",
1053 (int) pd->times.req_delay);
1054 storeAppendPrintf(e, "\tlast request result: %s\n",
1055 pd->req_result ? pd->req_result : "(none)");
1056
1057 storeAppendPrintf(e, "\npeer digest traffic:\n");
1058 storeAppendPrintf(e, "\trequests sent: %d, volume: %d KB\n",
1059 pd->stats.sent.msgs, (int) pd->stats.sent.kbytes.kb);
1060 storeAppendPrintf(e, "\treplies recv: %d, volume: %d KB\n",
1061 pd->stats.recv.msgs, (int) pd->stats.recv.kbytes.kb);
1062
1063 storeAppendPrintf(e, "\npeer digest structure:\n");
1064
1065 if (pd->cd)
1066 cacheDigestReport(pd->cd, host, e);
1067 else
1068 storeAppendPrintf(e, "\tno in-memory copy\n");
1069 }
1070
1071 #endif
1072