2 * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 72 Peer Digest Routines */
13 #include "CacheDigest.h"
14 #include "CachePeer.h"
18 #include "HttpReply.h"
19 #include "HttpRequest.h"
21 #include "MemObject.h"
22 #include "mime_header.h"
23 #include "neighbors.h"
24 #include "PeerDigest.h"
26 #include "store_key_md5.h"
27 #include "StoreClient.h"
33 /* local prototypes */
34 static time_t peerDigestIncDelay(const PeerDigest
* pd
);
35 static time_t peerDigestNewDelay(const StoreEntry
* e
);
36 static void peerDigestSetCheck(PeerDigest
* pd
, time_t delay
);
37 static EVH peerDigestCheck
;
38 static void peerDigestRequest(PeerDigest
* pd
);
39 static STCB peerDigestHandleReply
;
40 static int peerDigestFetchReply(void *, char *, ssize_t
);
41 int peerDigestSwapInHeaders(void *, char *, ssize_t
);
42 int peerDigestSwapInCBlock(void *, char *, ssize_t
);
43 int peerDigestSwapInMask(void *, char *, ssize_t
);
44 static int peerDigestFetchedEnough(DigestFetchState
* fetch
, char *buf
, ssize_t size
, const char *step_name
);
45 static void peerDigestFetchStop(DigestFetchState
* fetch
, char *buf
, const char *reason
);
46 static void peerDigestFetchAbort(DigestFetchState
* fetch
, char *buf
, const char *reason
);
47 static void peerDigestReqFinish(DigestFetchState
* fetch
, char *buf
, int, int, int, const char *reason
, int err
);
48 static void peerDigestPDFinish(DigestFetchState
* fetch
, int pcb_valid
, int err
);
49 static void peerDigestFetchFinish(DigestFetchState
* fetch
, int err
);
50 static void peerDigestFetchSetStats(DigestFetchState
* fetch
);
51 static int peerDigestSetCBlock(PeerDigest
* pd
, const char *buf
);
52 static int peerDigestUseful(const PeerDigest
* pd
);
55 Version
const CacheDigestVer
= { 5, 3 };
57 #define StoreDigestCBlockSize sizeof(StoreDigestCBlock)
59 /* min interval for requesting digests from a given peer */
60 static const time_t PeerDigestReqMinGap
= 5 * 60; /* seconds */
61 /* min interval for requesting digests (cumulative request stream) */
62 static const time_t GlobDigestReqMinGap
= 1 * 60; /* seconds */
66 static time_t pd_last_req_time
= 0; /* last call to Check */
68 PeerDigest::PeerDigest(CachePeer
* p
)
74 * Lock on to the peer here. The corresponding cbdataReferenceDone()
75 * is in peerDigestDestroy().
77 peer
= cbdataReference(p
);
78 /* if peer disappears, we will know it's name */
81 times
.initialized
= squid_curtime
;
84 CBDATA_CLASS_INIT(PeerDigest
);
86 CBDATA_CLASS_INIT(DigestFetchState
);
88 DigestFetchState::DigestFetchState(PeerDigest
*aPd
, HttpRequest
*req
) :
89 pd(cbdataReference(aPd
)),
97 start_time(squid_curtime
),
101 state(DIGEST_READ_REPLY
)
103 HTTPMSGLOCK(request
);
114 DigestFetchState::~DigestFetchState()
116 /* unlock everything */
117 storeUnregister(sc
, entry
, this);
119 entry
->unlock("DigestFetchState destructed");
122 HTTPMSGUNLOCK(request
);
124 assert(pd
== nullptr);
127 /* allocate new peer digest, call Init, and lock everything */
129 peerDigestCreate(CachePeer
* p
)
133 PeerDigest
*pd
= new PeerDigest(p
);
135 // TODO: make CachePeer member a CbcPointer
136 p
->digest
= cbdataReference(pd
);
138 // lock a reference to pd again to prevent the PeerDigest
139 // disappearing during peerDigestDestroy() when
140 // cbdataReferenceValidDone is called.
141 // TODO test if it can be moved into peerDigestDestroy() or
142 // if things can break earlier (eg CachePeer death).
143 (void)cbdataReference(pd
);
146 /* call Clean and free/unlock everything */
148 peerDigestDestroy(PeerDigest
* pd
)
152 void * peerTmp
= pd
->peer
;
156 * We locked the peer in PeerDigest constructor, this is
157 * where we unlock it.
159 if (cbdataReferenceValidDone(peerTmp
, &p
)) {
160 // we locked the p->digest in peerDigestCreate()
161 // this is where we unlock that
162 cbdataReferenceDone(static_cast<CachePeer
*>(p
)->digest
);
168 PeerDigest::~PeerDigest()
171 // req_result pointer is not owned by us
174 /* called by peer to indicate that somebody actually needs this digest */
176 peerDigestNeeded(PeerDigest
* pd
)
179 assert(!pd
->flags
.needed
);
182 pd
->flags
.needed
= true;
183 pd
->times
.needed
= squid_curtime
;
184 peerDigestSetCheck(pd
, 0); /* check asap */
187 /* increment retry delay [after an unsuccessful attempt] */
189 peerDigestIncDelay(const PeerDigest
* pd
)
192 return pd
->times
.retry_delay
> 0 ?
193 2 * pd
->times
.retry_delay
: /* exponential backoff */
194 PeerDigestReqMinGap
; /* minimal delay */
197 /* artificially increases Expires: setting to avoid race conditions
198 * returns the delay till that [increased] expiration time */
200 peerDigestNewDelay(const StoreEntry
* e
)
205 return e
->expires
+ PeerDigestReqMinGap
- squid_curtime
;
207 return PeerDigestReqMinGap
;
210 /* registers next digest verification */
212 peerDigestSetCheck(PeerDigest
* pd
, time_t delay
)
214 eventAdd("peerDigestCheck", peerDigestCheck
, pd
, (double) delay
, 1);
215 pd
->times
.next_check
= squid_curtime
+ delay
;
216 debugs(72, 3, "peerDigestSetCheck: will check peer " << pd
->host
<< " in " << delay
<< " secs");
220 * called when peer is about to disappear or have already disappeared
223 peerDigestNotePeerGone(PeerDigest
* pd
)
225 if (pd
->flags
.requested
) {
226 debugs(72, 2, "peerDigest: peer " << pd
->host
<< " gone, will destroy after fetch.");
227 /* do nothing now, the fetching chain will notice and take action */
229 debugs(72, 2, "peerDigest: peer " << pd
->host
<< " is gone, destroying now.");
230 peerDigestDestroy(pd
);
234 /* callback for eventAdd() (with peer digest locked)
235 * request new digest if our copy is too old or if we lack one;
236 * schedule next check otherwise */
238 peerDigestCheck(void *data
)
240 PeerDigest
*pd
= (PeerDigest
*)data
;
243 assert(!pd
->flags
.requested
);
245 pd
->times
.next_check
= 0; /* unknown */
247 if (!cbdataReferenceValid(pd
->peer
)) {
248 peerDigestNotePeerGone(pd
);
252 debugs(72, 3, "cache_peer " << *pd
->peer
);
253 debugs(72, 3, "peerDigestCheck: time: " << squid_curtime
<<
254 ", last received: " << (long int) pd
->times
.received
<< " (" <<
255 std::showpos
<< (int) (squid_curtime
- pd
->times
.received
) << ")");
257 /* decide when we should send the request:
258 * request now unless too close to other requests */
259 req_time
= squid_curtime
;
263 if (req_time
- pd
->times
.received
< PeerDigestReqMinGap
) {
264 debugs(72, 2, "peerDigestCheck: " << pd
->host
<<
265 ", avoiding close peer requests (" <<
266 (int) (req_time
- pd
->times
.received
) << " < " <<
267 (int) PeerDigestReqMinGap
<< " secs).");
269 req_time
= pd
->times
.received
+ PeerDigestReqMinGap
;
273 if (req_time
- pd_last_req_time
< GlobDigestReqMinGap
) {
274 debugs(72, 2, "peerDigestCheck: " << pd
->host
<<
275 ", avoiding close requests (" <<
276 (int) (req_time
- pd_last_req_time
) << " < " <<
277 (int) GlobDigestReqMinGap
<< " secs).");
279 req_time
= pd_last_req_time
+ GlobDigestReqMinGap
;
282 if (req_time
<= squid_curtime
)
283 peerDigestRequest(pd
); /* will set pd->flags.requested */
285 peerDigestSetCheck(pd
, req_time
- squid_curtime
);
288 /* ask store for a digest */
290 peerDigestRequest(PeerDigest
* pd
)
292 CachePeer
*p
= pd
->peer
;
293 StoreEntry
*e
, *old_e
;
296 StoreIOBuffer tempBuffer
;
298 pd
->req_result
= nullptr;
299 pd
->flags
.requested
= true;
301 /* compute future request components */
304 url
= xstrdup(p
->digest_url
);
306 url
= xstrdup(internalRemoteUri(p
->secure
.encryptTransport
, p
->host
, p
->http_port
, "/squid-internal-periodic/", SBuf(StoreDigestFileName
)));
309 const auto mx
= MasterXaction::MakePortless
<XactionInitiator::initCacheDigest
>();
310 req
= HttpRequest::FromUrlXXX(url
, mx
);
314 /* add custom headers */
315 assert(!req
->header
.len
);
317 req
->header
.putStr(Http::HdrType::ACCEPT
, StoreDigestMimeStr
);
319 req
->header
.putStr(Http::HdrType::ACCEPT
, "text/html");
322 p
->login
[0] != '*' &&
323 strcmp(p
->login
, "PASS") != 0 &&
324 strcmp(p
->login
, "PASSTHRU") != 0 &&
325 strncmp(p
->login
, "NEGOTIATE",9) != 0 &&
326 strcmp(p
->login
, "PROXYPASS") != 0) {
327 req
->url
.userInfo(SBuf(p
->login
)); // XXX: performance regression make peer login SBuf as well.
329 /* create fetch state structure */
330 DigestFetchState
*fetch
= new DigestFetchState(pd
, req
);
332 /* update timestamps */
333 pd
->times
.requested
= squid_curtime
;
334 pd_last_req_time
= squid_curtime
;
335 req
->flags
.cachable
.support(); // prevent RELEASE_REQUEST in storeCreateEntry()
337 /* the rest is based on clientReplyContext::processExpired() */
338 req
->flags
.refresh
= true;
340 old_e
= fetch
->old_entry
= storeGetPublicByRequest(req
);
342 // XXX: Missing a hittingRequiresCollapsing() && startCollapsingOn() check.
344 debugs(72, 5, "found old " << *old_e
);
346 old_e
->lock("peerDigestRequest");
347 old_e
->ensureMemObject(url
, url
, req
->method
);
349 fetch
->old_sc
= storeClientListAdd(old_e
, fetch
);
352 e
= fetch
->entry
= storeCreateEntry(url
, url
, req
->flags
, req
->method
);
353 debugs(72, 5, "created " << *e
);
354 assert(EBIT_TEST(e
->flags
, KEY_PRIVATE
));
355 fetch
->sc
= storeClientListAdd(e
, fetch
);
356 /* set lastmod to trigger IMS request if possible */
359 e
->lastModified(old_e
->lastModified());
361 /* push towards peer cache */
362 FwdState::fwdStart(Comm::ConnectionPointer(), e
, req
);
364 tempBuffer
.offset
= 0;
366 tempBuffer
.length
= SM_PAGE_SIZE
;
368 tempBuffer
.data
= fetch
->buf
;
370 storeClientCopy(fetch
->sc
, e
, tempBuffer
,
371 peerDigestHandleReply
, fetch
);
376 /* Handle the data copying .. */
379 * This routine handles the copy data and then redirects the
380 * copy to a bunch of subfunctions depending upon the copy state.
381 * It also tracks the buffer offset and "seen", since I'm actually
382 * not interested in rewriting everything to suit my little idea.
385 peerDigestHandleReply(void *data
, StoreIOBuffer receivedData
)
387 DigestFetchState
*fetch
= (DigestFetchState
*)data
;
389 digest_read_state_t prevstate
;
392 assert(fetch
->pd
&& receivedData
.data
);
393 /* The existing code assumes that the received pointer is
394 * where we asked the data to be put
396 assert(fetch
->buf
+ fetch
->bufofs
== receivedData
.data
);
398 /* Update the buffer size */
399 fetch
->bufofs
+= receivedData
.length
;
401 assert(fetch
->bufofs
<= SM_PAGE_SIZE
);
403 /* If we've fetched enough, return */
405 if (peerDigestFetchedEnough(fetch
, fetch
->buf
, fetch
->bufofs
, "peerDigestHandleReply"))
408 /* Call the right function based on the state */
409 /* (Those functions will update the state if needed) */
411 /* Give us a temporary reference. Some of the calls we make may
412 * try to destroy the fetch structure, and we like to know if they
415 CbcPointer
<DigestFetchState
> tmpLock
= fetch
;
417 /* Repeat this loop until we're out of data OR the state changes */
418 /* (So keep going if the state has changed and we still have data */
420 prevstate
= fetch
->state
;
422 switch (fetch
->state
) {
424 case DIGEST_READ_REPLY
:
425 retsize
= peerDigestFetchReply(fetch
, fetch
->buf
, fetch
->bufofs
);
428 case DIGEST_READ_HEADERS
:
429 retsize
= peerDigestSwapInHeaders(fetch
, fetch
->buf
, fetch
->bufofs
);
432 case DIGEST_READ_CBLOCK
:
433 retsize
= peerDigestSwapInCBlock(fetch
, fetch
->buf
, fetch
->bufofs
);
436 case DIGEST_READ_MASK
:
437 retsize
= peerDigestSwapInMask(fetch
, fetch
->buf
, fetch
->bufofs
);
440 case DIGEST_READ_NONE
:
443 case DIGEST_READ_DONE
:
448 fatal("Bad digest transfer mode!\n");
455 * The returned size indicates how much of the buffer was read -
456 * so move the remainder of the buffer to the beginning
457 * and update the bufofs / bufsize
459 newsize
= fetch
->bufofs
- retsize
;
461 memmove(fetch
->buf
, fetch
->buf
+ retsize
, fetch
->bufofs
- newsize
);
463 fetch
->bufofs
= newsize
;
465 } while (cbdataReferenceValid(fetch
) && prevstate
!= fetch
->state
&& fetch
->bufofs
> 0);
467 // Check for EOF here, thus giving the parser one extra run. We could avoid this overhead by
468 // checking at the beginning of this function. However, in this case, we would have to require
469 // that the parser does not regard EOF as a special condition (it is true now but may change
471 if (!receivedData
.length
) { // EOF
472 peerDigestFetchAbort(fetch
, fetch
->buf
, "premature end of digest reply");
476 /* Update the copy offset */
477 fetch
->offset
+= receivedData
.length
;
479 /* Schedule another copy */
480 if (cbdataReferenceValid(fetch
)) {
481 StoreIOBuffer tempBuffer
;
482 tempBuffer
.offset
= fetch
->offset
;
483 tempBuffer
.length
= SM_PAGE_SIZE
- fetch
->bufofs
;
484 tempBuffer
.data
= fetch
->buf
+ fetch
->bufofs
;
485 storeClientCopy(fetch
->sc
, fetch
->entry
, tempBuffer
,
486 peerDigestHandleReply
, fetch
);
490 /* wait for full http headers to be received then parse them */
492 * This routine handles parsing the reply line.
493 * If the reply line indicates an OK, the same data is thrown
494 * to SwapInHeaders(). If the reply line is a NOT_MODIFIED,
495 * we simply stop parsing.
498 peerDigestFetchReply(void *data
, char *buf
, ssize_t size
)
500 DigestFetchState
*fetch
= (DigestFetchState
*)data
;
501 PeerDigest
*pd
= fetch
->pd
;
504 assert(!fetch
->offset
);
506 assert(fetch
->state
== DIGEST_READ_REPLY
);
508 if (peerDigestFetchedEnough(fetch
, buf
, size
, "peerDigestFetchReply"))
511 if ((hdr_size
= headersEnd(buf
, size
))) {
512 const auto &reply
= fetch
->entry
->mem().freshestReply();
513 const auto status
= reply
.sline
.status();
514 assert(status
!= Http::scNone
);
515 debugs(72, 3, "peerDigestFetchReply: " << pd
->host
<< " status: " << status
<<
516 ", expires: " << (long int) reply
.expires
<< " (" << std::showpos
<<
517 (int) (reply
.expires
- squid_curtime
) << ")");
519 /* this "if" is based on clientHandleIMSReply() */
521 if (status
== Http::scNotModified
) {
522 /* our old entry is fine */
523 assert(fetch
->old_entry
);
525 if (!fetch
->old_entry
->mem_obj
->request
)
526 fetch
->old_entry
->mem_obj
->request
= fetch
->entry
->mem_obj
->request
;
528 assert(fetch
->old_entry
->mem_obj
->request
);
530 Store::Root().updateOnNotModified(fetch
->old_entry
, *fetch
->entry
);
532 /* get rid of 304 reply */
533 storeUnregister(fetch
->sc
, fetch
->entry
, fetch
);
535 fetch
->entry
->unlock("peerDigestFetchReply 304");
537 fetch
->entry
= fetch
->old_entry
;
539 fetch
->old_entry
= nullptr;
541 /* preserve request -- we need its size to update counters */
542 /* requestUnlink(r); */
543 /* fetch->entry->mem_obj->request = NULL; */
544 } else if (status
== Http::scOkay
) {
545 /* get rid of old entry if any */
547 if (fetch
->old_entry
) {
548 debugs(72, 3, "peerDigestFetchReply: got new digest, releasing old one");
549 storeUnregister(fetch
->old_sc
, fetch
->old_entry
, fetch
);
550 fetch
->old_entry
->releaseRequest();
551 fetch
->old_entry
->unlock("peerDigestFetchReply 200");
552 fetch
->old_entry
= nullptr;
555 /* some kind of a bug */
556 peerDigestFetchAbort(fetch
, buf
, reply
.sline
.reason());
557 return -1; /* XXX -1 will abort stuff in ReadReply! */
560 /* must have a ready-to-use store entry if we got here */
561 /* can we stay with the old in-memory digest? */
562 if (status
== Http::scNotModified
&& fetch
->pd
->cd
) {
563 peerDigestFetchStop(fetch
, buf
, "Not modified");
564 fetch
->state
= DIGEST_READ_DONE
;
566 fetch
->state
= DIGEST_READ_HEADERS
;
569 /* need more data, do we have space? */
571 if (size
>= SM_PAGE_SIZE
)
572 peerDigestFetchAbort(fetch
, buf
, "reply header too big");
575 /* We don't want to actually ack that we've handled anything,
576 * otherwise SwapInHeaders() won't get the reply line .. */
580 /* fetch headers from disk, pass on to SwapInCBlock */
582 peerDigestSwapInHeaders(void *data
, char *buf
, ssize_t size
)
584 DigestFetchState
*fetch
= (DigestFetchState
*)data
;
587 assert(fetch
->state
== DIGEST_READ_HEADERS
);
589 if (peerDigestFetchedEnough(fetch
, buf
, size
, "peerDigestSwapInHeaders"))
592 assert(!fetch
->offset
);
594 if ((hdr_size
= headersEnd(buf
, size
))) {
595 const auto &reply
= fetch
->entry
->mem().freshestReply();
596 const auto status
= reply
.sline
.status();
597 assert(status
!= Http::scNone
);
599 if (status
!= Http::scOkay
) {
600 debugs(72, DBG_IMPORTANT
, "peerDigestSwapInHeaders: " << fetch
->pd
->host
<<
601 " status " << status
<< " got cached!");
603 peerDigestFetchAbort(fetch
, buf
, "internal status error");
607 fetch
->state
= DIGEST_READ_CBLOCK
;
608 return hdr_size
; /* Say how much data we read */
611 /* need more data, do we have space? */
612 if (size
>= SM_PAGE_SIZE
) {
613 peerDigestFetchAbort(fetch
, buf
, "stored header too big");
617 return 0; /* We need to read more to parse .. */
621 peerDigestSwapInCBlock(void *data
, char *buf
, ssize_t size
)
623 DigestFetchState
*fetch
= (DigestFetchState
*)data
;
625 assert(fetch
->state
== DIGEST_READ_CBLOCK
);
627 if (peerDigestFetchedEnough(fetch
, buf
, size
, "peerDigestSwapInCBlock"))
630 if (size
>= (ssize_t
)StoreDigestCBlockSize
) {
631 PeerDigest
*pd
= fetch
->pd
;
634 assert(fetch
->entry
->mem_obj
);
636 if (peerDigestSetCBlock(pd
, buf
)) {
637 /* XXX: soon we will have variable header size */
638 /* switch to CD buffer and fetch digest guts */
640 assert(pd
->cd
->mask
);
641 fetch
->state
= DIGEST_READ_MASK
;
642 return StoreDigestCBlockSize
;
644 peerDigestFetchAbort(fetch
, buf
, "invalid digest cblock");
649 /* need more data, do we have space? */
650 if (size
>= SM_PAGE_SIZE
) {
651 peerDigestFetchAbort(fetch
, buf
, "digest cblock too big");
655 return 0; /* We need more data */
659 peerDigestSwapInMask(void *data
, char *buf
, ssize_t size
)
661 DigestFetchState
*fetch
= (DigestFetchState
*)data
;
665 assert(pd
->cd
&& pd
->cd
->mask
);
668 * NOTENOTENOTENOTENOTE: buf doesn't point to pd->cd->mask anymore!
669 * we need to do the copy ourselves!
671 memcpy(pd
->cd
->mask
+ fetch
->mask_offset
, buf
, size
);
673 /* NOTE! buf points to the middle of pd->cd->mask! */
675 if (peerDigestFetchedEnough(fetch
, nullptr, size
, "peerDigestSwapInMask"))
678 fetch
->mask_offset
+= size
;
680 if (fetch
->mask_offset
>= pd
->cd
->mask_size
) {
681 debugs(72, 2, "peerDigestSwapInMask: Done! Got " <<
682 fetch
->mask_offset
<< ", expected " << pd
->cd
->mask_size
);
683 assert(fetch
->mask_offset
== pd
->cd
->mask_size
);
684 assert(peerDigestFetchedEnough(fetch
, nullptr, 0, "peerDigestSwapInMask"));
685 return -1; /* XXX! */
688 /* We always read everything, so return size */
693 peerDigestFetchedEnough(DigestFetchState
* fetch
, char *buf
, ssize_t size
, const char *step_name
)
695 static const SBuf
hostUnknown("<unknown>"); // peer host (if any)
696 SBuf host
= hostUnknown
;
698 PeerDigest
*pd
= nullptr;
699 const char *reason
= nullptr; /* reason for completion */
700 const char *no_bug
= nullptr; /* successful completion if set */
701 const int pdcb_valid
= cbdataReferenceValid(fetch
->pd
);
702 const int pcb_valid
= cbdataReferenceValid(fetch
->pd
->peer
);
704 /* test possible exiting conditions (the same for most steps!)
705 * cases marked with '?!' should not happen */
708 if (!(pd
= fetch
->pd
))
709 reason
= "peer digest disappeared?!";
714 debugs(72, 6, step_name
<< ": peer " << host
<< ", offset: " <<
715 fetch
->offset
<< " size: " << size
<< ".");
717 /* continue checking (with pd and host known and valid) */
720 if (!cbdataReferenceValid(pd
->peer
))
721 reason
= "peer disappeared";
723 reason
= "swap failure";
724 else if (!fetch
->entry
)
725 reason
= "swap aborted?!";
726 else if (EBIT_TEST(fetch
->entry
->flags
, ENTRY_ABORTED
))
727 reason
= "swap aborted";
730 /* continue checking (maybe-successful eof case) */
731 if (!reason
&& !size
) {
733 reason
= "null digest?!";
734 else if (fetch
->mask_offset
!= pd
->cd
->mask_size
)
735 reason
= "premature end of digest?!";
736 else if (!peerDigestUseful(pd
))
737 reason
= "useless digest";
739 reason
= no_bug
= "success";
742 /* finish if we have a reason */
744 const int level
= strstr(reason
, "?!") ? 1 : 3;
745 debugs(72, level
, "" << step_name
<< ": peer " << host
<< ", exiting after '" << reason
<< "'");
746 peerDigestReqFinish(fetch
, buf
,
747 1, pdcb_valid
, pcb_valid
, reason
, !no_bug
);
750 assert(pdcb_valid
&& pcb_valid
);
753 return reason
!= nullptr;
756 /* call this when all callback data is valid and fetch must be stopped but
757 * no error has occurred (e.g. we received 304 reply and reuse old digest) */
759 peerDigestFetchStop(DigestFetchState
* fetch
, char *buf
, const char *reason
)
762 debugs(72, 2, "peerDigestFetchStop: peer " << fetch
->pd
->host
<< ", reason: " << reason
);
763 peerDigestReqFinish(fetch
, buf
, 1, 1, 1, reason
, 0);
766 /* call this when all callback data is valid but something bad happened */
768 peerDigestFetchAbort(DigestFetchState
* fetch
, char *buf
, const char *reason
)
771 debugs(72, 2, "peerDigestFetchAbort: peer " << fetch
->pd
->host
<< ", reason: " << reason
);
772 peerDigestReqFinish(fetch
, buf
, 1, 1, 1, reason
, 1);
775 /* complete the digest transfer, update stats, unlock/release everything */
777 peerDigestReqFinish(DigestFetchState
* fetch
, char * /* buf */,
778 int fcb_valid
, int pdcb_valid
, int pcb_valid
,
779 const char *reason
, int err
)
783 /* must go before peerDigestPDFinish */
786 fetch
->pd
->flags
.requested
= false;
787 fetch
->pd
->req_result
= reason
;
790 /* schedule next check if peer is still out there */
792 PeerDigest
*pd
= fetch
->pd
;
795 pd
->times
.retry_delay
= peerDigestIncDelay(pd
);
796 peerDigestSetCheck(pd
, pd
->times
.retry_delay
);
798 pd
->times
.retry_delay
= 0;
799 peerDigestSetCheck(pd
, peerDigestNewDelay(fetch
->entry
));
803 /* note: order is significant */
805 peerDigestFetchSetStats(fetch
);
808 peerDigestPDFinish(fetch
, pcb_valid
, err
);
811 peerDigestFetchFinish(fetch
, err
);
814 /* destroys digest if peer disappeared
815 * must be called only when fetch and pd cbdata are valid */
817 peerDigestPDFinish(DigestFetchState
* fetch
, int pcb_valid
, int err
)
819 PeerDigest
*pd
= fetch
->pd
;
820 const auto host
= pd
->host
;
821 pd
->times
.received
= squid_curtime
;
822 pd
->times
.req_delay
= fetch
->resp_time
;
823 pd
->stats
.sent
.kbytes
+= fetch
->sent
.bytes
;
824 pd
->stats
.recv
.kbytes
+= fetch
->recv
.bytes
;
825 pd
->stats
.sent
.msgs
+= fetch
->sent
.msg
;
826 pd
->stats
.recv
.msgs
+= fetch
->recv
.msg
;
829 debugs(72, DBG_IMPORTANT
, "" << (pcb_valid
? "temporary " : "" ) << "disabling (" << pd
->req_result
<< ") digest from " << host
);
834 pd
->flags
.usable
= false;
837 peerDigestNotePeerGone(pd
);
841 pd
->flags
.usable
= true;
843 /* XXX: ugly condition, but how? */
845 if (fetch
->entry
->store_status
== STORE_OK
)
846 debugs(72, 2, "re-used old digest from " << host
);
848 debugs(72, 2, "received valid digest from " << host
);
851 cbdataReferenceDone(fetch
->pd
);
854 /* free fetch state structures
855 * must be called only when fetch cbdata is valid */
857 peerDigestFetchFinish(DigestFetchState
* fetch
, int /* err */)
859 assert(fetch
->entry
&& fetch
->request
);
861 if (fetch
->old_entry
) {
862 debugs(72, 3, "peerDigestFetchFinish: deleting old entry");
863 storeUnregister(fetch
->old_sc
, fetch
->old_entry
, fetch
);
864 fetch
->old_entry
->releaseRequest();
865 fetch
->old_entry
->unlock("peerDigestFetchFinish old");
866 fetch
->old_entry
= nullptr;
869 /* update global stats */
870 statCounter
.cd
.kbytes_sent
+= fetch
->sent
.bytes
;
871 statCounter
.cd
.kbytes_recv
+= fetch
->recv
.bytes
;
872 statCounter
.cd
.msgs_sent
+= fetch
->sent
.msg
;
873 statCounter
.cd
.msgs_recv
+= fetch
->recv
.msg
;
878 /* calculate fetch stats after completion */
880 peerDigestFetchSetStats(DigestFetchState
* fetch
)
883 assert(fetch
->entry
&& fetch
->request
);
885 mem
= fetch
->entry
->mem_obj
;
888 /* XXX: outgoing numbers are not precise */
889 /* XXX: we must distinguish between 304 hits and misses here */
890 fetch
->sent
.bytes
= fetch
->request
->prefixLen();
891 /* XXX: this is slightly wrong: we don't KNOW that the entire memobject
892 * was fetched. We only know how big it is
894 fetch
->recv
.bytes
= mem
->size();
895 fetch
->sent
.msg
= fetch
->recv
.msg
= 1;
896 fetch
->expires
= fetch
->entry
->expires
;
897 fetch
->resp_time
= squid_curtime
- fetch
->start_time
;
899 debugs(72, 3, "peerDigestFetchFinish: recv " << fetch
->recv
.bytes
<<
900 " bytes in " << (int) fetch
->resp_time
<< " secs");
902 debugs(72, 3, "peerDigestFetchFinish: expires: " <<
903 (long int) fetch
->expires
<< " (" << std::showpos
<<
904 (int) (fetch
->expires
- squid_curtime
) << "), lmt: " <<
905 std::noshowpos
<< (long int) fetch
->entry
->lastModified() << " (" <<
906 std::showpos
<< (int) (fetch
->entry
->lastModified() - squid_curtime
) <<
912 peerDigestSetCBlock(PeerDigest
* pd
, const char *buf
)
914 StoreDigestCBlock cblock
;
916 const auto host
= pd
->host
;
918 memcpy(&cblock
, buf
, sizeof(cblock
));
919 /* network -> host conversions */
920 cblock
.ver
.current
= ntohs(cblock
.ver
.current
);
921 cblock
.ver
.required
= ntohs(cblock
.ver
.required
);
922 cblock
.capacity
= ntohl(cblock
.capacity
);
923 cblock
.count
= ntohl(cblock
.count
);
924 cblock
.del_count
= ntohl(cblock
.del_count
);
925 cblock
.mask_size
= ntohl(cblock
.mask_size
);
926 debugs(72, 2, "got digest cblock from " << host
<< "; ver: " <<
927 (int) cblock
.ver
.current
<< " (req: " << (int) cblock
.ver
.required
<<
930 debugs(72, 2, "\t size: " <<
931 cblock
.mask_size
<< " bytes, e-cnt: " <<
932 cblock
.count
<< ", e-util: " <<
933 xpercentInt(cblock
.count
, cblock
.capacity
) << "%" );
934 /* check version requirements (both ways) */
936 if (cblock
.ver
.required
> CacheDigestVer
.current
) {
937 debugs(72, DBG_IMPORTANT
, "" << host
<< " digest requires version " <<
938 cblock
.ver
.required
<< "; have: " << CacheDigestVer
.current
);
943 if (cblock
.ver
.current
< CacheDigestVer
.required
) {
944 debugs(72, DBG_IMPORTANT
, "" << host
<< " digest is version " <<
945 cblock
.ver
.current
<< "; we require: " <<
946 CacheDigestVer
.required
);
951 /* check consistency */
952 if (cblock
.ver
.required
> cblock
.ver
.current
||
953 cblock
.mask_size
<= 0 || cblock
.capacity
<= 0 ||
954 cblock
.bits_per_entry
<= 0 || cblock
.hash_func_count
<= 0) {
955 debugs(72, DBG_CRITICAL
, "" << host
<< " digest cblock is corrupted.");
959 /* check consistency further */
960 if ((size_t)cblock
.mask_size
!= CacheDigest::CalcMaskSize(cblock
.capacity
, cblock
.bits_per_entry
)) {
961 debugs(72, DBG_CRITICAL
, host
<< " digest cblock is corrupted " <<
962 "(mask size mismatch: " << cblock
.mask_size
<< " ? " <<
963 CacheDigest::CalcMaskSize(cblock
.capacity
, cblock
.bits_per_entry
)
968 /* there are some things we cannot do yet */
969 if (cblock
.hash_func_count
!= CacheDigestHashFuncCount
) {
970 debugs(72, DBG_CRITICAL
, "ERROR: " << host
<< " digest: unsupported #hash functions: " <<
971 cblock
.hash_func_count
<< " ? " << CacheDigestHashFuncCount
<< ".");
976 * no cblock bugs below this point
978 /* check size changes */
979 if (pd
->cd
&& cblock
.mask_size
!= (ssize_t
)pd
->cd
->mask_size
) {
980 debugs(72, 2, host
<< " digest changed size: " << cblock
.mask_size
<<
981 " -> " << pd
->cd
->mask_size
);
982 freed_size
= pd
->cd
->mask_size
;
988 debugs(72, 2, "creating " << host
<< " digest; size: " << cblock
.mask_size
<< " (" <<
989 std::showpos
<< (int) (cblock
.mask_size
- freed_size
) << ") bytes");
990 pd
->cd
= new CacheDigest(cblock
.capacity
, cblock
.bits_per_entry
);
992 if (cblock
.mask_size
>= freed_size
)
993 statCounter
.cd
.memory
+= (cblock
.mask_size
- freed_size
);
997 /* these assignments leave us in an inconsistent state until we finish reading the digest */
998 pd
->cd
->count
= cblock
.count
;
999 pd
->cd
->del_count
= cblock
.del_count
;
1004 peerDigestUseful(const PeerDigest
* pd
)
1006 /* TODO: we should calculate the prob of a false hit instead of bit util */
1007 const auto bit_util
= pd
->cd
->usedMaskPercent();
1009 if (bit_util
> 65.0) {
1010 debugs(72, DBG_CRITICAL
, "WARNING: " << pd
->host
<<
1011 " peer digest has too many bits on (" << bit_util
<< "%).");
1019 saneDiff(time_t diff
)
1021 return abs((int) diff
) > squid_curtime
/ 2 ? 0 : diff
;
1025 peerDigestStatsReport(const PeerDigest
* pd
, StoreEntry
* e
)
1027 #define f2s(flag) (pd->flags.flag ? "yes" : "no")
1028 #define appendTime(tm) storeAppendPrintf(e, "%s\t %10ld\t %+d\t %+d\n", \
1029 ""#tm, (long int)pd->times.tm, \
1030 saneDiff(pd->times.tm - squid_curtime), \
1031 saneDiff(pd->times.tm - pd->times.initialized))
1035 auto host
= pd
->host
;
1036 storeAppendPrintf(e
, "\npeer digest from " SQUIDSBUFPH
"\n", SQUIDSBUFPRINT(host
));
1038 cacheDigestGuessStatsReport(&pd
->stats
.guess
, e
, host
);
1040 storeAppendPrintf(e
, "\nevent\t timestamp\t secs from now\t secs from init\n");
1041 appendTime(initialized
);
1043 appendTime(requested
);
1044 appendTime(received
);
1045 appendTime(next_check
);
1047 storeAppendPrintf(e
, "peer digest state:\n");
1048 storeAppendPrintf(e
, "\tneeded: %3s, usable: %3s, requested: %3s\n",
1049 f2s(needed
), f2s(usable
), f2s(requested
));
1050 storeAppendPrintf(e
, "\n\tlast retry delay: %d secs\n",
1051 (int) pd
->times
.retry_delay
);
1052 storeAppendPrintf(e
, "\tlast request response time: %d secs\n",
1053 (int) pd
->times
.req_delay
);
1054 storeAppendPrintf(e
, "\tlast request result: %s\n",
1055 pd
->req_result
? pd
->req_result
: "(none)");
1057 storeAppendPrintf(e
, "\npeer digest traffic:\n");
1058 storeAppendPrintf(e
, "\trequests sent: %d, volume: %d KB\n",
1059 pd
->stats
.sent
.msgs
, (int) pd
->stats
.sent
.kbytes
.kb
);
1060 storeAppendPrintf(e
, "\treplies recv: %d, volume: %d KB\n",
1061 pd
->stats
.recv
.msgs
, (int) pd
->stats
.recv
.kbytes
.kb
);
1063 storeAppendPrintf(e
, "\npeer digest structure:\n");
1066 cacheDigestReport(pd
->cd
, host
, e
);
1068 storeAppendPrintf(e
, "\tno in-memory copy\n");