]> git.ipfire.org Git - thirdparty/squid.git/blob - src/peer_digest.cc
Polish: Drop redundant HttpMsgPointerT template
[thirdparty/squid.git] / src / peer_digest.cc
1
2 /*
3 * DEBUG: section 72 Peer Digest Routines
4 * AUTHOR: Alex Rousskov
5 *
6 * SQUID Web Proxy Cache http://www.squid-cache.org/
7 * ----------------------------------------------------------
8 *
9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
31 *
32 */
33
34 #include "squid.h"
35 #if USE_CACHE_DIGESTS
36 #include "CacheDigest.h"
37 #include "CachePeer.h"
38 #include "event.h"
39 #include "forward.h"
40 #include "globals.h"
41 #include "HttpReply.h"
42 #include "HttpRequest.h"
43 #include "internal.h"
44 #include "MemObject.h"
45 #include "neighbors.h"
46 #include "mime_header.h"
47 #include "PeerDigest.h"
48 #include "SquidTime.h"
49 #include "Store.h"
50 #include "store_key_md5.h"
51 #include "StoreClient.h"
52 #include "tools.h"
53
54 /* local types */
55
56 /* local prototypes */
57 static time_t peerDigestIncDelay(const PeerDigest * pd);
58 static time_t peerDigestNewDelay(const StoreEntry * e);
59 static void peerDigestSetCheck(PeerDigest * pd, time_t delay);
60 static void peerDigestClean(PeerDigest *);
61 static EVH peerDigestCheck;
62 static void peerDigestRequest(PeerDigest * pd);
63 static STCB peerDigestHandleReply;
64 static int peerDigestFetchReply(void *, char *, ssize_t);
65 int peerDigestSwapInHeaders(void *, char *, ssize_t);
66 int peerDigestSwapInCBlock(void *, char *, ssize_t);
67 int peerDigestSwapInMask(void *, char *, ssize_t);
68 static int peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name);
69 static void peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason);
70 static void peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason);
71 static void peerDigestReqFinish(DigestFetchState * fetch, char *buf, int, int, int, const char *reason, int err);
72 static void peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err);
73 static void peerDigestFetchFinish(DigestFetchState * fetch, int err);
74 static void peerDigestFetchSetStats(DigestFetchState * fetch);
75 static int peerDigestSetCBlock(PeerDigest * pd, const char *buf);
76 static int peerDigestUseful(const PeerDigest * pd);
77
78 /* local constants */
79 Version const CacheDigestVer = { 5, 3 };
80
81 #define StoreDigestCBlockSize sizeof(StoreDigestCBlock)
82
83 /* min interval for requesting digests from a given peer */
84 static const time_t PeerDigestReqMinGap = 5 * 60; /* seconds */
85 /* min interval for requesting digests (cumulative request stream) */
86 static const time_t GlobDigestReqMinGap = 1 * 60; /* seconds */
87
88 /* local vars */
89
90 static time_t pd_last_req_time = 0; /* last call to Check */
91
92 /* initialize peer digest */
93 static void
94 peerDigestInit(PeerDigest * pd, CachePeer * p)
95 {
96 assert(pd && p);
97
98 memset(pd, 0, sizeof(*pd));
99 /*
100 * DPW 2007-04-12
101 * Lock on to the peer here. The corresponding cbdataReferenceDone()
102 * is in peerDigestDestroy().
103 */
104 pd->peer = cbdataReference(p);
105 /* if peer disappears, we will know it's name */
106 pd->host = p->host;
107
108 pd->times.initialized = squid_curtime;
109 }
110
111 static void
112 peerDigestClean(PeerDigest * pd)
113 {
114 assert(pd);
115
116 if (pd->cd)
117 cacheDigestDestroy(pd->cd);
118
119 pd->host.clean();
120 }
121
122 CBDATA_CLASS_INIT(PeerDigest);
123
124 void *
125 PeerDigest::operator new (size_t)
126 {
127 CBDATA_INIT_TYPE(PeerDigest);
128 PeerDigest *result = cbdataAlloc(PeerDigest);
129 return result;
130 }
131
132 void
133 PeerDigest::operator delete (void *address)
134 {
135 PeerDigest *t = static_cast<PeerDigest *>(address);
136 cbdataFree(t);
137 }
138
139 /* allocate new peer digest, call Init, and lock everything */
140 PeerDigest *
141 peerDigestCreate(CachePeer * p)
142 {
143 PeerDigest *pd;
144 assert(p);
145
146 pd = new PeerDigest;
147 peerDigestInit(pd, p);
148
149 /* XXX This does not look right, and the same thing again in the caller */
150 return cbdataReference(pd);
151 }
152
153 /* call Clean and free/unlock everything */
154 static void
155 peerDigestDestroy(PeerDigest * pd)
156 {
157 void *p;
158 assert(pd);
159 void * peerTmp = pd->peer;
160
161 /*
162 * DPW 2007-04-12
163 * We locked the peer in peerDigestInit(), this is
164 * where we unlock it. If the peer is still valid,
165 * tell it that the digest is gone.
166 */
167 if (cbdataReferenceValidDone(peerTmp, &p))
168 peerNoteDigestGone((CachePeer *)p);
169
170 peerDigestClean(pd);
171
172 delete pd;
173 }
174
175 /* called by peer to indicate that somebody actually needs this digest */
176 void
177 peerDigestNeeded(PeerDigest * pd)
178 {
179 assert(pd);
180 assert(!pd->flags.needed);
181 assert(!pd->cd);
182
183 pd->flags.needed = true;
184 pd->times.needed = squid_curtime;
185 peerDigestSetCheck(pd, 0); /* check asap */
186 }
187
188 /* currently we do not have a reason to disable without destroying */
189 #if FUTURE_CODE
190 /* disables peer for good */
191 static void
192 peerDigestDisable(PeerDigest * pd)
193 {
194 debugs(72, 2, "peerDigestDisable: peer " << pd->host.buf() << " disabled for good");
195 pd->times.disabled = squid_curtime;
196 pd->times.next_check = -1; /* never */
197 pd->flags.usable = 0;
198
199 if (pd->cd) {
200 cacheDigestDestroy(pd->cd);
201 pd->cd = NULL;
202 }
203
204 /* we do not destroy the pd itself to preserve its "history" and stats */
205 }
206
207 #endif
208
209 /* increment retry delay [after an unsuccessful attempt] */
210 static time_t
211 peerDigestIncDelay(const PeerDigest * pd)
212 {
213 assert(pd);
214 return pd->times.retry_delay > 0 ?
215 2 * pd->times.retry_delay : /* exponential backoff */
216 PeerDigestReqMinGap; /* minimal delay */
217 }
218
219 /* artificially increases Expires: setting to avoid race conditions
220 * returns the delay till that [increased] expiration time */
221 static time_t
222 peerDigestNewDelay(const StoreEntry * e)
223 {
224 assert(e);
225
226 if (e->expires > 0)
227 return e->expires + PeerDigestReqMinGap - squid_curtime;
228
229 return PeerDigestReqMinGap;
230 }
231
232 /* registers next digest verification */
233 static void
234 peerDigestSetCheck(PeerDigest * pd, time_t delay)
235 {
236 eventAdd("peerDigestCheck", peerDigestCheck, pd, (double) delay, 1);
237 pd->times.next_check = squid_curtime + delay;
238 debugs(72, 3, "peerDigestSetCheck: will check peer " << pd->host << " in " << delay << " secs");
239 }
240
241 /*
242 * called when peer is about to disappear or have already disappeared
243 */
244 void
245 peerDigestNotePeerGone(PeerDigest * pd)
246 {
247 if (pd->flags.requested) {
248 debugs(72, 2, "peerDigest: peer " << pd->host << " gone, will destroy after fetch.");
249 /* do nothing now, the fetching chain will notice and take action */
250 } else {
251 debugs(72, 2, "peerDigest: peer " << pd->host << " is gone, destroying now.");
252 peerDigestDestroy(pd);
253 }
254 }
255
256 /* callback for eventAdd() (with peer digest locked)
257 * request new digest if our copy is too old or if we lack one;
258 * schedule next check otherwise */
259 static void
260 peerDigestCheck(void *data)
261 {
262 PeerDigest *pd = (PeerDigest *)data;
263 time_t req_time;
264
265 assert(!pd->flags.requested);
266
267 pd->times.next_check = 0; /* unknown */
268
269 if (!cbdataReferenceValid(pd->peer)) {
270 peerDigestNotePeerGone(pd);
271 return;
272 }
273
274 debugs(72, 3, "peerDigestCheck: peer " << pd->peer->host << ":" << pd->peer->http_port);
275 debugs(72, 3, "peerDigestCheck: time: " << squid_curtime <<
276 ", last received: " << (long int) pd->times.received << " (" <<
277 std::showpos << (int) (squid_curtime - pd->times.received) << ")");
278
279 /* decide when we should send the request:
280 * request now unless too close to other requests */
281 req_time = squid_curtime;
282
283 /* per-peer limit */
284
285 if (req_time - pd->times.received < PeerDigestReqMinGap) {
286 debugs(72, 2, "peerDigestCheck: " << pd->host <<
287 ", avoiding close peer requests (" <<
288 (int) (req_time - pd->times.received) << " < " <<
289 (int) PeerDigestReqMinGap << " secs).");
290
291 req_time = pd->times.received + PeerDigestReqMinGap;
292 }
293
294 /* global limit */
295 if (req_time - pd_last_req_time < GlobDigestReqMinGap) {
296 debugs(72, 2, "peerDigestCheck: " << pd->host <<
297 ", avoiding close requests (" <<
298 (int) (req_time - pd_last_req_time) << " < " <<
299 (int) GlobDigestReqMinGap << " secs).");
300
301 req_time = pd_last_req_time + GlobDigestReqMinGap;
302 }
303
304 if (req_time <= squid_curtime)
305 peerDigestRequest(pd); /* will set pd->flags.requested */
306 else
307 peerDigestSetCheck(pd, req_time - squid_curtime);
308 }
309
310 CBDATA_TYPE(DigestFetchState);
311
312 /* ask store for a digest */
313 static void
314 peerDigestRequest(PeerDigest * pd)
315 {
316 CachePeer *p = pd->peer;
317 StoreEntry *e, *old_e;
318 char *url;
319 const cache_key *key;
320 HttpRequest *req;
321 DigestFetchState *fetch = NULL;
322 StoreIOBuffer tempBuffer;
323
324 pd->req_result = NULL;
325 pd->flags.requested = true;
326
327 /* compute future request components */
328
329 if (p->digest_url)
330 url = xstrdup(p->digest_url);
331 else
332 url = internalRemoteUri(p->host, p->http_port,
333 "/squid-internal-periodic/", StoreDigestFileName);
334
335 req = HttpRequest::CreateFromUrl(url);
336
337 assert(req);
338
339 key = storeKeyPublicByRequest(req);
340
341 debugs(72, 2, "peerDigestRequest: " << url << " key: " << storeKeyText(key));
342
343 /* add custom headers */
344 assert(!req->header.len);
345
346 req->header.putStr(HDR_ACCEPT, StoreDigestMimeStr);
347
348 req->header.putStr(HDR_ACCEPT, "text/html");
349
350 if (p->login)
351 xstrncpy(req->login, p->login, MAX_LOGIN_SZ);
352
353 /* create fetch state structure */
354 CBDATA_INIT_TYPE(DigestFetchState);
355
356 fetch = cbdataAlloc(DigestFetchState);
357
358 fetch->request = req;
359 HTTPMSGLOCK(fetch->request);
360
361 fetch->pd = cbdataReference(pd);
362
363 fetch->offset = 0;
364
365 fetch->state = DIGEST_READ_REPLY;
366
367 /* update timestamps */
368 fetch->start_time = squid_curtime;
369
370 pd->times.requested = squid_curtime;
371
372 pd_last_req_time = squid_curtime;
373
374 req->flags.cachable = true;
375
376 /* the rest is based on clientProcessExpired() */
377 req->flags.refresh = true;
378
379 old_e = fetch->old_entry = Store::Root().get(key);
380
381 if (old_e) {
382 debugs(72, 5, "peerDigestRequest: found old entry");
383
384 old_e->lock();
385 old_e->createMemObject(url, url);
386
387 fetch->old_sc = storeClientListAdd(old_e, fetch);
388 }
389
390 e = fetch->entry = storeCreateEntry(url, url, req->flags, req->method);
391 assert(EBIT_TEST(e->flags, KEY_PRIVATE));
392 fetch->sc = storeClientListAdd(e, fetch);
393 /* set lastmod to trigger IMS request if possible */
394
395 if (old_e)
396 e->lastmod = old_e->lastmod;
397
398 /* push towards peer cache */
399 debugs(72, 3, "peerDigestRequest: forwarding to fwdStart...");
400
401 FwdState::fwdStart(Comm::ConnectionPointer(), e, req);
402
403 tempBuffer.offset = 0;
404
405 tempBuffer.length = SM_PAGE_SIZE;
406
407 tempBuffer.data = fetch->buf;
408
409 storeClientCopy(fetch->sc, e, tempBuffer,
410 peerDigestHandleReply, fetch);
411 }
412
413 /* Handle the data copying .. */
414
415 /*
416 * This routine handles the copy data and then redirects the
417 * copy to a bunch of subfunctions depending upon the copy state.
418 * It also tracks the buffer offset and "seen", since I'm actually
419 * not interested in rewriting everything to suit my little idea.
420 */
421 static void
422 peerDigestHandleReply(void *data, StoreIOBuffer receivedData)
423 {
424 DigestFetchState *fetch = (DigestFetchState *)data;
425 int retsize = -1;
426 digest_read_state_t prevstate;
427 int newsize;
428
429 assert(fetch->pd && receivedData.data);
430 /* The existing code assumes that the received pointer is
431 * where we asked the data to be put
432 */
433 assert(fetch->buf + fetch->bufofs == receivedData.data);
434
435 /* Update the buffer size */
436 fetch->bufofs += receivedData.length;
437
438 assert(fetch->bufofs <= SM_PAGE_SIZE);
439
440 /* If we've fetched enough, return */
441
442 if (peerDigestFetchedEnough(fetch, fetch->buf, fetch->bufofs, "peerDigestHandleReply"))
443 return;
444
445 /* Call the right function based on the state */
446 /* (Those functions will update the state if needed) */
447
448 /* Give us a temporary reference. Some of the calls we make may
449 * try to destroy the fetch structure, and we like to know if they
450 * do
451 */
452 fetch = cbdataReference(fetch);
453
454 /* Repeat this loop until we're out of data OR the state changes */
455 /* (So keep going if the state has changed and we still have data */
456 do {
457 prevstate = fetch->state;
458
459 switch (fetch->state) {
460
461 case DIGEST_READ_REPLY:
462 retsize = peerDigestFetchReply(fetch, fetch->buf, fetch->bufofs);
463 break;
464
465 case DIGEST_READ_HEADERS:
466 retsize = peerDigestSwapInHeaders(fetch, fetch->buf, fetch->bufofs);
467 break;
468
469 case DIGEST_READ_CBLOCK:
470 retsize = peerDigestSwapInCBlock(fetch, fetch->buf, fetch->bufofs);
471 break;
472
473 case DIGEST_READ_MASK:
474 retsize = peerDigestSwapInMask(fetch, fetch->buf, fetch->bufofs);
475 break;
476
477 case DIGEST_READ_NONE:
478 break;
479
480 case DIGEST_READ_DONE:
481 goto finish;
482 break;
483
484 default:
485 fatal("Bad digest transfer mode!\n");
486 }
487
488 if (retsize < 0)
489 goto finish;
490
491 /*
492 * The returned size indicates how much of the buffer was read -
493 * so move the remainder of the buffer to the beginning
494 * and update the bufofs / bufsize
495 */
496 newsize = fetch->bufofs - retsize;
497
498 memmove(fetch->buf, fetch->buf + retsize, fetch->bufofs - newsize);
499
500 fetch->bufofs = newsize;
501
502 } while (cbdataReferenceValid(fetch) && prevstate != fetch->state && fetch->bufofs > 0);
503
504 /* Update the copy offset */
505 fetch->offset += receivedData.length;
506
507 /* Schedule another copy */
508 if (cbdataReferenceValid(fetch)) {
509 StoreIOBuffer tempBuffer;
510 tempBuffer.offset = fetch->offset;
511 tempBuffer.length = SM_PAGE_SIZE - fetch->bufofs;
512 tempBuffer.data = fetch->buf + fetch->bufofs;
513 storeClientCopy(fetch->sc, fetch->entry, tempBuffer,
514 peerDigestHandleReply, fetch);
515 }
516
517 finish:
518 /* Get rid of our reference, we've finished with it for now */
519 cbdataReferenceDone(fetch);
520 }
521
522 /* wait for full http headers to be received then parse them */
523 /*
524 * This routine handles parsing the reply line.
525 * If the reply line indicates an OK, the same data is thrown
526 * to SwapInHeaders(). If the reply line is a NOT_MODIFIED,
527 * we simply stop parsing.
528 */
529 static int
530 peerDigestFetchReply(void *data, char *buf, ssize_t size)
531 {
532 DigestFetchState *fetch = (DigestFetchState *)data;
533 PeerDigest *pd = fetch->pd;
534 size_t hdr_size;
535 assert(pd && buf);
536 assert(!fetch->offset);
537
538 assert(fetch->state == DIGEST_READ_REPLY);
539
540 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply"))
541 return -1;
542
543 if ((hdr_size = headersEnd(buf, size))) {
544 http_status status;
545 HttpReply const *reply = fetch->entry->getReply();
546 assert(reply);
547 assert (reply->sline.status != 0);
548 status = reply->sline.status;
549 debugs(72, 3, "peerDigestFetchReply: " << pd->host << " status: " << status <<
550 ", expires: " << (long int) reply->expires << " (" << std::showpos <<
551 (int) (reply->expires - squid_curtime) << ")");
552
553 /* this "if" is based on clientHandleIMSReply() */
554
555 if (status == HTTP_NOT_MODIFIED) {
556 /* our old entry is fine */
557 assert(fetch->old_entry);
558
559 if (!fetch->old_entry->mem_obj->request) {
560 fetch->old_entry->mem_obj->request = fetch->entry->mem_obj->request;
561 HTTPMSGLOCK(fetch->old_entry->mem_obj->request);
562 }
563
564 assert(fetch->old_entry->mem_obj->request);
565
566 HttpReply *old_rep = (HttpReply *) fetch->old_entry->getReply();
567
568 old_rep->updateOnNotModified(reply);
569
570 fetch->old_entry->timestampsSet();
571
572 /* get rid of 304 reply */
573 storeUnregister(fetch->sc, fetch->entry, fetch);
574
575 fetch->entry->unlock();
576
577 fetch->entry = fetch->old_entry;
578
579 fetch->old_entry = NULL;
580
581 /* preserve request -- we need its size to update counters */
582 /* requestUnlink(r); */
583 /* fetch->entry->mem_obj->request = NULL; */
584 } else if (status == HTTP_OK) {
585 /* get rid of old entry if any */
586
587 if (fetch->old_entry) {
588 debugs(72, 3, "peerDigestFetchReply: got new digest, releasing old one");
589 storeUnregister(fetch->old_sc, fetch->old_entry, fetch);
590 fetch->old_entry->releaseRequest();
591 fetch->old_entry->unlock();
592 fetch->old_entry = NULL;
593 }
594 } else {
595 /* some kind of a bug */
596 peerDigestFetchAbort(fetch, buf, httpStatusLineReason(&reply->sline));
597 return -1; /* XXX -1 will abort stuff in ReadReply! */
598 }
599
600 /* must have a ready-to-use store entry if we got here */
601 /* can we stay with the old in-memory digest? */
602 if (status == HTTP_NOT_MODIFIED && fetch->pd->cd) {
603 peerDigestFetchStop(fetch, buf, "Not modified");
604 fetch->state = DIGEST_READ_DONE;
605 } else {
606 fetch->state = DIGEST_READ_HEADERS;
607 }
608 } else {
609 /* need more data, do we have space? */
610
611 if (size >= SM_PAGE_SIZE)
612 peerDigestFetchAbort(fetch, buf, "reply header too big");
613 }
614
615 /* We don't want to actually ack that we've handled anything,
616 * otherwise SwapInHeaders() won't get the reply line .. */
617 return 0;
618 }
619
620 /* fetch headers from disk, pass on to SwapInCBlock */
621 int
622 peerDigestSwapInHeaders(void *data, char *buf, ssize_t size)
623 {
624 DigestFetchState *fetch = (DigestFetchState *)data;
625 size_t hdr_size;
626
627 assert(fetch->state == DIGEST_READ_HEADERS);
628
629 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInHeaders"))
630 return -1;
631
632 assert(!fetch->offset);
633
634 if ((hdr_size = headersEnd(buf, size))) {
635 assert(fetch->entry->getReply());
636 assert (fetch->entry->getReply()->sline.status != 0);
637
638 if (fetch->entry->getReply()->sline.status != HTTP_OK) {
639 debugs(72, DBG_IMPORTANT, "peerDigestSwapInHeaders: " << fetch->pd->host <<
640 " status " << fetch->entry->getReply()->sline.status <<
641 " got cached!");
642
643 peerDigestFetchAbort(fetch, buf, "internal status error");
644 return -1;
645 }
646
647 fetch->state = DIGEST_READ_CBLOCK;
648 return hdr_size; /* Say how much data we read */
649 } else {
650 /* need more data, do we have space? */
651
652 if (size >= SM_PAGE_SIZE) {
653 peerDigestFetchAbort(fetch, buf, "stored header too big");
654 return -1;
655 } else {
656 return 0; /* We need to read more to parse .. */
657 }
658 }
659
660 fatal("peerDigestSwapInHeaders() - shouldn't get here!\n");
661 return 0; /* keep gcc happy */
662 }
663
664 int
665 peerDigestSwapInCBlock(void *data, char *buf, ssize_t size)
666 {
667 DigestFetchState *fetch = (DigestFetchState *)data;
668
669 assert(fetch->state == DIGEST_READ_CBLOCK);
670
671 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInCBlock"))
672 return -1;
673
674 if (size >= (ssize_t)StoreDigestCBlockSize) {
675 PeerDigest *pd = fetch->pd;
676
677 assert(pd && fetch->entry->getReply());
678
679 if (peerDigestSetCBlock(pd, buf)) {
680 /* XXX: soon we will have variable header size */
681 /* switch to CD buffer and fetch digest guts */
682 buf = NULL;
683 assert(pd->cd->mask);
684 fetch->state = DIGEST_READ_MASK;
685 return StoreDigestCBlockSize;
686 } else {
687 peerDigestFetchAbort(fetch, buf, "invalid digest cblock");
688 return -1;
689 }
690 } else {
691 /* need more data, do we have space? */
692
693 if (size >= SM_PAGE_SIZE) {
694 peerDigestFetchAbort(fetch, buf, "digest cblock too big");
695 return -1;
696 } else {
697 return 0; /* We need more data */
698 }
699 }
700
701 fatal("peerDigestSwapInCBlock(): shouldn't get here!\n");
702 return 0; /* keep gcc happy */
703 }
704
705 int
706 peerDigestSwapInMask(void *data, char *buf, ssize_t size)
707 {
708 DigestFetchState *fetch = (DigestFetchState *)data;
709 PeerDigest *pd;
710
711 pd = fetch->pd;
712 assert(pd->cd && pd->cd->mask);
713
714 /*
715 * NOTENOTENOTENOTENOTE: buf doesn't point to pd->cd->mask anymore!
716 * we need to do the copy ourselves!
717 */
718 memcpy(pd->cd->mask + fetch->mask_offset, buf, size);
719
720 /* NOTE! buf points to the middle of pd->cd->mask! */
721
722 if (peerDigestFetchedEnough(fetch, NULL, size, "peerDigestSwapInMask"))
723 return -1;
724
725 fetch->mask_offset += size;
726
727 if (fetch->mask_offset >= pd->cd->mask_size) {
728 debugs(72, 2, "peerDigestSwapInMask: Done! Got " <<
729 fetch->mask_offset << ", expected " << pd->cd->mask_size);
730 assert(fetch->mask_offset == pd->cd->mask_size);
731 assert(peerDigestFetchedEnough(fetch, NULL, 0, "peerDigestSwapInMask"));
732 return -1; /* XXX! */
733 } else {
734 /* We always read everything, so return so */
735 return size;
736 }
737
738 fatal("peerDigestSwapInMask(): shouldn't get here!\n");
739 return 0; /* keep gcc happy */
740 }
741
742 static int
743 peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name)
744 {
745 PeerDigest *pd = NULL;
746 const char *host = "<unknown>"; /* peer host */
747 const char *reason = NULL; /* reason for completion */
748 const char *no_bug = NULL; /* successful completion if set */
749 const int pdcb_valid = cbdataReferenceValid(fetch->pd);
750 const int pcb_valid = cbdataReferenceValid(fetch->pd->peer);
751
752 /* test possible exiting conditions (the same for most steps!)
753 * cases marked with '?!' should not happen */
754
755 if (!reason) {
756 if (!(pd = fetch->pd))
757 reason = "peer digest disappeared?!";
758
759 #if DONT /* WHY NOT? /HNO */
760
761 else if (!cbdataReferenceValid(pd))
762 reason = "invalidated peer digest?!";
763
764 #endif
765
766 else
767 host = pd->host.termedBuf();
768 }
769
770 debugs(72, 6, step_name << ": peer " << host << ", offset: " <<
771 fetch->offset << " size: " << size << ".");
772
773 /* continue checking (with pd and host known and valid) */
774
775 if (!reason) {
776 if (!cbdataReferenceValid(pd->peer))
777 reason = "peer disappeared";
778 else if (size < 0)
779 reason = "swap failure";
780 else if (!fetch->entry)
781 reason = "swap aborted?!";
782 else if (EBIT_TEST(fetch->entry->flags, ENTRY_ABORTED))
783 reason = "swap aborted";
784 }
785
786 /* continue checking (maybe-successful eof case) */
787 if (!reason && !size) {
788 if (!pd->cd)
789 reason = "null digest?!";
790 else if (fetch->mask_offset != (int)pd->cd->mask_size)
791 reason = "premature end of digest?!";
792 else if (!peerDigestUseful(pd))
793 reason = "useless digest";
794 else
795 reason = no_bug = "success";
796 }
797
798 /* finish if we have a reason */
799 if (reason) {
800 const int level = strstr(reason, "?!") ? 1 : 3;
801 debugs(72, level, "" << step_name << ": peer " << host << ", exiting after '" << reason << "'");
802 peerDigestReqFinish(fetch, buf,
803 1, pdcb_valid, pcb_valid, reason, !no_bug);
804 } else {
805 /* paranoid check */
806 assert(pdcb_valid && pcb_valid);
807 }
808
809 return reason != NULL;
810 }
811
812 /* call this when all callback data is valid and fetch must be stopped but
813 * no error has occurred (e.g. we received 304 reply and reuse old digest) */
814 static void
815 peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason)
816 {
817 assert(reason);
818 debugs(72, 2, "peerDigestFetchStop: peer " << fetch->pd->host << ", reason: " << reason);
819 peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 0);
820 }
821
822 /* call this when all callback data is valid but something bad happened */
823 static void
824 peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason)
825 {
826 assert(reason);
827 debugs(72, 2, "peerDigestFetchAbort: peer " << fetch->pd->host << ", reason: " << reason);
828 peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 1);
829 }
830
831 /* complete the digest transfer, update stats, unlock/release everything */
832 static void
833 peerDigestReqFinish(DigestFetchState * fetch, char *buf,
834 int fcb_valid, int pdcb_valid, int pcb_valid,
835 const char *reason, int err)
836 {
837 assert(reason);
838
839 /* must go before peerDigestPDFinish */
840
841 if (pdcb_valid) {
842 fetch->pd->flags.requested = false;
843 fetch->pd->req_result = reason;
844 }
845
846 /* schedule next check if peer is still out there */
847 if (pcb_valid) {
848 PeerDigest *pd = fetch->pd;
849
850 if (err) {
851 pd->times.retry_delay = peerDigestIncDelay(pd);
852 peerDigestSetCheck(pd, pd->times.retry_delay);
853 } else {
854 pd->times.retry_delay = 0;
855 peerDigestSetCheck(pd, peerDigestNewDelay(fetch->entry));
856 }
857 }
858
859 /* note: order is significant */
860 if (fcb_valid)
861 peerDigestFetchSetStats(fetch);
862
863 if (pdcb_valid)
864 peerDigestPDFinish(fetch, pcb_valid, err);
865
866 if (fcb_valid)
867 peerDigestFetchFinish(fetch, err);
868 }
869
870 /* destroys digest if peer disappeared
871 * must be called only when fetch and pd cbdata are valid */
872 static void
873 peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err)
874 {
875 PeerDigest *pd = fetch->pd;
876 const char *host = pd->host.termedBuf();
877
878 pd->times.received = squid_curtime;
879 pd->times.req_delay = fetch->resp_time;
880 kb_incr(&pd->stats.sent.kbytes, (size_t) fetch->sent.bytes);
881 kb_incr(&pd->stats.recv.kbytes, (size_t) fetch->recv.bytes);
882 pd->stats.sent.msgs += fetch->sent.msg;
883 pd->stats.recv.msgs += fetch->recv.msg;
884
885 if (err) {
886 debugs(72, DBG_IMPORTANT, "" << (pcb_valid ? "temporary " : "" ) << "disabling (" << pd->req_result << ") digest from " << host);
887
888 if (pd->cd) {
889 cacheDigestDestroy(pd->cd);
890 pd->cd = NULL;
891 }
892
893 pd->flags.usable = false;
894
895 if (!pcb_valid)
896 peerDigestNotePeerGone(pd);
897 } else {
898 assert(pcb_valid);
899
900 pd->flags.usable = true;
901
902 /* XXX: ugly condition, but how? */
903
904 if (fetch->entry->store_status == STORE_OK)
905 debugs(72, 2, "re-used old digest from " << host);
906 else
907 debugs(72, 2, "received valid digest from " << host);
908 }
909
910 cbdataReferenceDone(fetch->pd);
911 }
912
913 /* free fetch state structures
914 * must be called only when fetch cbdata is valid */
915 static void
916 peerDigestFetchFinish(DigestFetchState * fetch, int err)
917 {
918 assert(fetch->entry && fetch->request);
919
920 if (fetch->old_entry) {
921 debugs(72, 3, "peerDigestFetchFinish: deleting old entry");
922 storeUnregister(fetch->old_sc, fetch->old_entry, fetch);
923 fetch->old_entry->releaseRequest();
924 fetch->old_entry->unlock();
925 fetch->old_entry = NULL;
926 }
927
928 /* update global stats */
929 kb_incr(&statCounter.cd.kbytes_sent, (size_t) fetch->sent.bytes);
930
931 kb_incr(&statCounter.cd.kbytes_recv, (size_t) fetch->recv.bytes);
932
933 statCounter.cd.msgs_sent += fetch->sent.msg;
934
935 statCounter.cd.msgs_recv += fetch->recv.msg;
936
937 /* unlock everything */
938 storeUnregister(fetch->sc, fetch->entry, fetch);
939
940 fetch->entry->unlock();
941
942 HTTPMSGUNLOCK(fetch->request);
943
944 fetch->entry = NULL;
945
946 assert(fetch->pd == NULL);
947
948 cbdataFree(fetch);
949 }
950
951 /* calculate fetch stats after completion */
952 static void
953 peerDigestFetchSetStats(DigestFetchState * fetch)
954 {
955 MemObject *mem;
956 assert(fetch->entry && fetch->request);
957
958 mem = fetch->entry->mem_obj;
959 assert(mem);
960
961 /* XXX: outgoing numbers are not precise */
962 /* XXX: we must distinguish between 304 hits and misses here */
963 fetch->sent.bytes = fetch->request->prefixLen();
964 /* XXX: this is slightly wrong: we don't KNOW that the entire memobject
965 * was fetched. We only know how big it is
966 */
967 fetch->recv.bytes = mem->size();
968 fetch->sent.msg = fetch->recv.msg = 1;
969 fetch->expires = fetch->entry->expires;
970 fetch->resp_time = squid_curtime - fetch->start_time;
971
972 debugs(72, 3, "peerDigestFetchFinish: recv " << fetch->recv.bytes <<
973 " bytes in " << (int) fetch->resp_time << " secs");
974
975 debugs(72, 3, "peerDigestFetchFinish: expires: " <<
976 (long int) fetch->expires << " (" << std::showpos <<
977 (int) (fetch->expires - squid_curtime) << "), lmt: " <<
978 std::noshowpos << (long int) fetch->entry->lastmod << " (" <<
979 std::showpos << (int) (fetch->entry->lastmod - squid_curtime) <<
980 ")");
981
982 }
983
984 static int
985 peerDigestSetCBlock(PeerDigest * pd, const char *buf)
986 {
987 StoreDigestCBlock cblock;
988 int freed_size = 0;
989 const char *host = pd->host.termedBuf();
990
991 memcpy(&cblock, buf, sizeof(cblock));
992 /* network -> host conversions */
993 cblock.ver.current = ntohs(cblock.ver.current);
994 cblock.ver.required = ntohs(cblock.ver.required);
995 cblock.capacity = ntohl(cblock.capacity);
996 cblock.count = ntohl(cblock.count);
997 cblock.del_count = ntohl(cblock.del_count);
998 cblock.mask_size = ntohl(cblock.mask_size);
999 debugs(72, 2, "got digest cblock from " << host << "; ver: " <<
1000 (int) cblock.ver.current << " (req: " << (int) cblock.ver.required <<
1001 ")");
1002
1003 debugs(72, 2, "\t size: " <<
1004 cblock.mask_size << " bytes, e-cnt: " <<
1005 cblock.count << ", e-util: " <<
1006 xpercentInt(cblock.count, cblock.capacity) << "%" );
1007 /* check version requirements (both ways) */
1008
1009 if (cblock.ver.required > CacheDigestVer.current) {
1010 debugs(72, DBG_IMPORTANT, "" << host << " digest requires version " <<
1011 cblock.ver.required << "; have: " << CacheDigestVer.current);
1012
1013 return 0;
1014 }
1015
1016 if (cblock.ver.current < CacheDigestVer.required) {
1017 debugs(72, DBG_IMPORTANT, "" << host << " digest is version " <<
1018 cblock.ver.current << "; we require: " <<
1019 CacheDigestVer.required);
1020
1021 return 0;
1022 }
1023
1024 /* check consistency */
1025 if (cblock.ver.required > cblock.ver.current ||
1026 cblock.mask_size <= 0 || cblock.capacity <= 0 ||
1027 cblock.bits_per_entry <= 0 || cblock.hash_func_count <= 0) {
1028 debugs(72, DBG_CRITICAL, "" << host << " digest cblock is corrupted.");
1029 return 0;
1030 }
1031
1032 /* check consistency further */
1033 if ((size_t)cblock.mask_size != cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry)) {
1034 debugs(72, DBG_CRITICAL, host << " digest cblock is corrupted " <<
1035 "(mask size mismatch: " << cblock.mask_size << " ? " <<
1036 cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry)
1037 << ").");
1038 return 0;
1039 }
1040
1041 /* there are some things we cannot do yet */
1042 if (cblock.hash_func_count != CacheDigestHashFuncCount) {
1043 debugs(72, DBG_CRITICAL, "" << host << " digest: unsupported #hash functions: " <<
1044 cblock.hash_func_count << " ? " << CacheDigestHashFuncCount << ".");
1045 return 0;
1046 }
1047
1048 /*
1049 * no cblock bugs below this point
1050 */
1051 /* check size changes */
1052 if (pd->cd && cblock.mask_size != (ssize_t)pd->cd->mask_size) {
1053 debugs(72, 2, host << " digest changed size: " << cblock.mask_size <<
1054 " -> " << pd->cd->mask_size);
1055 freed_size = pd->cd->mask_size;
1056 cacheDigestDestroy(pd->cd);
1057 pd->cd = NULL;
1058 }
1059
1060 if (!pd->cd) {
1061 debugs(72, 2, "creating " << host << " digest; size: " << cblock.mask_size << " (" <<
1062 std::showpos << (int) (cblock.mask_size - freed_size) << ") bytes");
1063 pd->cd = cacheDigestCreate(cblock.capacity, cblock.bits_per_entry);
1064
1065 if (cblock.mask_size >= freed_size)
1066 kb_incr(&statCounter.cd.memory, cblock.mask_size - freed_size);
1067 }
1068
1069 assert(pd->cd);
1070 /* these assignments leave us in an inconsistent state until we finish reading the digest */
1071 pd->cd->count = cblock.count;
1072 pd->cd->del_count = cblock.del_count;
1073 return 1;
1074 }
1075
1076 static int
1077 peerDigestUseful(const PeerDigest * pd)
1078 {
1079 /* TODO: we should calculate the prob of a false hit instead of bit util */
1080 const int bit_util = cacheDigestBitUtil(pd->cd);
1081
1082 if (bit_util > 65) {
1083 debugs(72, DBG_CRITICAL, "Warning: " << pd->host <<
1084 " peer digest has too many bits on (" << bit_util << "%%).");
1085
1086 return 0;
1087 }
1088
1089 return 1;
1090 }
1091
1092 static int
1093 saneDiff(time_t diff)
1094 {
1095 return abs((int) diff) > squid_curtime / 2 ? 0 : diff;
1096 }
1097
1098 void
1099 peerDigestStatsReport(const PeerDigest * pd, StoreEntry * e)
1100 {
1101 #define f2s(flag) (pd->flags.flag ? "yes" : "no")
1102 #define appendTime(tm) storeAppendPrintf(e, "%s\t %10ld\t %+d\t %+d\n", \
1103 ""#tm, (long int)pd->times.tm, \
1104 saneDiff(pd->times.tm - squid_curtime), \
1105 saneDiff(pd->times.tm - pd->times.initialized))
1106
1107 assert(pd);
1108
1109 const char *host = pd->host.termedBuf();
1110 storeAppendPrintf(e, "\npeer digest from %s\n", host);
1111
1112 cacheDigestGuessStatsReport(&pd->stats.guess, e, host);
1113
1114 storeAppendPrintf(e, "\nevent\t timestamp\t secs from now\t secs from init\n");
1115 appendTime(initialized);
1116 appendTime(needed);
1117 appendTime(requested);
1118 appendTime(received);
1119 appendTime(next_check);
1120
1121 storeAppendPrintf(e, "peer digest state:\n");
1122 storeAppendPrintf(e, "\tneeded: %3s, usable: %3s, requested: %3s\n",
1123 f2s(needed), f2s(usable), f2s(requested));
1124 storeAppendPrintf(e, "\n\tlast retry delay: %d secs\n",
1125 (int) pd->times.retry_delay);
1126 storeAppendPrintf(e, "\tlast request response time: %d secs\n",
1127 (int) pd->times.req_delay);
1128 storeAppendPrintf(e, "\tlast request result: %s\n",
1129 pd->req_result ? pd->req_result : "(none)");
1130
1131 storeAppendPrintf(e, "\npeer digest traffic:\n");
1132 storeAppendPrintf(e, "\trequests sent: %d, volume: %d KB\n",
1133 pd->stats.sent.msgs, (int) pd->stats.sent.kbytes.kb);
1134 storeAppendPrintf(e, "\treplies recv: %d, volume: %d KB\n",
1135 pd->stats.recv.msgs, (int) pd->stats.recv.kbytes.kb);
1136
1137 storeAppendPrintf(e, "\npeer digest structure:\n");
1138
1139 if (pd->cd)
1140 cacheDigestReport(pd->cd, host, e);
1141 else
1142 storeAppendPrintf(e, "\tno in-memory copy\n");
1143 }
1144
1145 #endif