]> git.ipfire.org Git - thirdparty/squid.git/blob - src/peer_digest.cc
Bug 4019: Regression in 3.4 r13079
[thirdparty/squid.git] / src / peer_digest.cc
1
2 /*
3 * DEBUG: section 72 Peer Digest Routines
4 * AUTHOR: Alex Rousskov
5 *
6 * SQUID Web Proxy Cache http://www.squid-cache.org/
7 * ----------------------------------------------------------
8 *
9 * Squid is the result of efforts by numerous individuals from
10 * the Internet community; see the CONTRIBUTORS file for full
11 * details. Many organizations have provided support for Squid's
12 * development; see the SPONSORS file for full details. Squid is
13 * Copyrighted (C) 2001 by the Regents of the University of
14 * California; see the COPYRIGHT file for full details. Squid
15 * incorporates software developed and/or copyrighted by other
16 * sources; see the CREDITS file for full details.
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2 of the License, or
21 * (at your option) any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA.
31 *
32 */
33
34 #include "squid.h"
35 #if USE_CACHE_DIGESTS
36 #include "CacheDigest.h"
37 #include "CachePeer.h"
38 #include "event.h"
39 #include "FwdState.h"
40 #include "globals.h"
41 #include "HttpReply.h"
42 #include "HttpRequest.h"
43 #include "internal.h"
44 #include "MemObject.h"
45 #include "mime_header.h"
46 #include "neighbors.h"
47 #include "PeerDigest.h"
48 #include "SquidTime.h"
49 #include "Store.h"
50 #include "store_key_md5.h"
51 #include "StoreClient.h"
52 #include "tools.h"
53
54 /* local types */
55
56 /* local prototypes */
57 static time_t peerDigestIncDelay(const PeerDigest * pd);
58 static time_t peerDigestNewDelay(const StoreEntry * e);
59 static void peerDigestSetCheck(PeerDigest * pd, time_t delay);
60 static void peerDigestClean(PeerDigest *);
61 static EVH peerDigestCheck;
62 static void peerDigestRequest(PeerDigest * pd);
63 static STCB peerDigestHandleReply;
64 static int peerDigestFetchReply(void *, char *, ssize_t);
65 int peerDigestSwapInHeaders(void *, char *, ssize_t);
66 int peerDigestSwapInCBlock(void *, char *, ssize_t);
67 int peerDigestSwapInMask(void *, char *, ssize_t);
68 static int peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name);
69 static void peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason);
70 static void peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason);
71 static void peerDigestReqFinish(DigestFetchState * fetch, char *buf, int, int, int, const char *reason, int err);
72 static void peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err);
73 static void peerDigestFetchFinish(DigestFetchState * fetch, int err);
74 static void peerDigestFetchSetStats(DigestFetchState * fetch);
75 static int peerDigestSetCBlock(PeerDigest * pd, const char *buf);
76 static int peerDigestUseful(const PeerDigest * pd);
77
78 /* local constants */
79 Version const CacheDigestVer = { 5, 3 };
80
81 #define StoreDigestCBlockSize sizeof(StoreDigestCBlock)
82
83 /* min interval for requesting digests from a given peer */
84 static const time_t PeerDigestReqMinGap = 5 * 60; /* seconds */
85 /* min interval for requesting digests (cumulative request stream) */
86 static const time_t GlobDigestReqMinGap = 1 * 60; /* seconds */
87
88 /* local vars */
89
90 static time_t pd_last_req_time = 0; /* last call to Check */
91
92 /* initialize peer digest */
93 static void
94 peerDigestInit(PeerDigest * pd, CachePeer * p)
95 {
96 assert(pd && p);
97
98 memset(pd, 0, sizeof(*pd));
99 /*
100 * DPW 2007-04-12
101 * Lock on to the peer here. The corresponding cbdataReferenceDone()
102 * is in peerDigestDestroy().
103 */
104 pd->peer = cbdataReference(p);
105 /* if peer disappears, we will know it's name */
106 pd->host = p->host;
107
108 pd->times.initialized = squid_curtime;
109 }
110
111 static void
112 peerDigestClean(PeerDigest * pd)
113 {
114 assert(pd);
115
116 if (pd->cd)
117 cacheDigestDestroy(pd->cd);
118
119 pd->host.clean();
120 }
121
122 CBDATA_CLASS_INIT(PeerDigest);
123
124 /* allocate new peer digest, call Init, and lock everything */
125 PeerDigest *
126 peerDigestCreate(CachePeer * p)
127 {
128 PeerDigest *pd;
129 assert(p);
130
131 pd = new PeerDigest;
132 peerDigestInit(pd, p);
133
134 /* XXX This does not look right, and the same thing again in the caller */
135 return cbdataReference(pd);
136 }
137
138 /* call Clean and free/unlock everything */
139 static void
140 peerDigestDestroy(PeerDigest * pd)
141 {
142 void *p;
143 assert(pd);
144 void * peerTmp = pd->peer;
145
146 /*
147 * DPW 2007-04-12
148 * We locked the peer in peerDigestInit(), this is
149 * where we unlock it. If the peer is still valid,
150 * tell it that the digest is gone.
151 */
152 if (cbdataReferenceValidDone(peerTmp, &p))
153 peerNoteDigestGone((CachePeer *)p);
154
155 peerDigestClean(pd);
156
157 delete pd;
158 }
159
160 /* called by peer to indicate that somebody actually needs this digest */
161 void
162 peerDigestNeeded(PeerDigest * pd)
163 {
164 assert(pd);
165 assert(!pd->flags.needed);
166 assert(!pd->cd);
167
168 pd->flags.needed = true;
169 pd->times.needed = squid_curtime;
170 peerDigestSetCheck(pd, 0); /* check asap */
171 }
172
173 /* currently we do not have a reason to disable without destroying */
174 #if FUTURE_CODE
175 /* disables peer for good */
176 static void
177 peerDigestDisable(PeerDigest * pd)
178 {
179 debugs(72, 2, "peerDigestDisable: peer " << pd->host.buf() << " disabled for good");
180 pd->times.disabled = squid_curtime;
181 pd->times.next_check = -1; /* never */
182 pd->flags.usable = 0;
183
184 if (pd->cd) {
185 cacheDigestDestroy(pd->cd);
186 pd->cd = NULL;
187 }
188
189 /* we do not destroy the pd itself to preserve its "history" and stats */
190 }
191
192 #endif
193
194 /* increment retry delay [after an unsuccessful attempt] */
195 static time_t
196 peerDigestIncDelay(const PeerDigest * pd)
197 {
198 assert(pd);
199 return pd->times.retry_delay > 0 ?
200 2 * pd->times.retry_delay : /* exponential backoff */
201 PeerDigestReqMinGap; /* minimal delay */
202 }
203
204 /* artificially increases Expires: setting to avoid race conditions
205 * returns the delay till that [increased] expiration time */
206 static time_t
207 peerDigestNewDelay(const StoreEntry * e)
208 {
209 assert(e);
210
211 if (e->expires > 0)
212 return e->expires + PeerDigestReqMinGap - squid_curtime;
213
214 return PeerDigestReqMinGap;
215 }
216
217 /* registers next digest verification */
218 static void
219 peerDigestSetCheck(PeerDigest * pd, time_t delay)
220 {
221 eventAdd("peerDigestCheck", peerDigestCheck, pd, (double) delay, 1);
222 pd->times.next_check = squid_curtime + delay;
223 debugs(72, 3, "peerDigestSetCheck: will check peer " << pd->host << " in " << delay << " secs");
224 }
225
226 /*
227 * called when peer is about to disappear or have already disappeared
228 */
229 void
230 peerDigestNotePeerGone(PeerDigest * pd)
231 {
232 if (pd->flags.requested) {
233 debugs(72, 2, "peerDigest: peer " << pd->host << " gone, will destroy after fetch.");
234 /* do nothing now, the fetching chain will notice and take action */
235 } else {
236 debugs(72, 2, "peerDigest: peer " << pd->host << " is gone, destroying now.");
237 peerDigestDestroy(pd);
238 }
239 }
240
241 /* callback for eventAdd() (with peer digest locked)
242 * request new digest if our copy is too old or if we lack one;
243 * schedule next check otherwise */
244 static void
245 peerDigestCheck(void *data)
246 {
247 PeerDigest *pd = (PeerDigest *)data;
248 time_t req_time;
249
250 assert(!pd->flags.requested);
251
252 pd->times.next_check = 0; /* unknown */
253
254 if (!cbdataReferenceValid(pd->peer)) {
255 peerDigestNotePeerGone(pd);
256 return;
257 }
258
259 debugs(72, 3, "peerDigestCheck: peer " << pd->peer->host << ":" << pd->peer->http_port);
260 debugs(72, 3, "peerDigestCheck: time: " << squid_curtime <<
261 ", last received: " << (long int) pd->times.received << " (" <<
262 std::showpos << (int) (squid_curtime - pd->times.received) << ")");
263
264 /* decide when we should send the request:
265 * request now unless too close to other requests */
266 req_time = squid_curtime;
267
268 /* per-peer limit */
269
270 if (req_time - pd->times.received < PeerDigestReqMinGap) {
271 debugs(72, 2, "peerDigestCheck: " << pd->host <<
272 ", avoiding close peer requests (" <<
273 (int) (req_time - pd->times.received) << " < " <<
274 (int) PeerDigestReqMinGap << " secs).");
275
276 req_time = pd->times.received + PeerDigestReqMinGap;
277 }
278
279 /* global limit */
280 if (req_time - pd_last_req_time < GlobDigestReqMinGap) {
281 debugs(72, 2, "peerDigestCheck: " << pd->host <<
282 ", avoiding close requests (" <<
283 (int) (req_time - pd_last_req_time) << " < " <<
284 (int) GlobDigestReqMinGap << " secs).");
285
286 req_time = pd_last_req_time + GlobDigestReqMinGap;
287 }
288
289 if (req_time <= squid_curtime)
290 peerDigestRequest(pd); /* will set pd->flags.requested */
291 else
292 peerDigestSetCheck(pd, req_time - squid_curtime);
293 }
294
295 CBDATA_TYPE(DigestFetchState);
296
297 /* ask store for a digest */
298 static void
299 peerDigestRequest(PeerDigest * pd)
300 {
301 CachePeer *p = pd->peer;
302 StoreEntry *e, *old_e;
303 char *url = NULL;
304 const cache_key *key;
305 HttpRequest *req;
306 DigestFetchState *fetch = NULL;
307 StoreIOBuffer tempBuffer;
308
309 pd->req_result = NULL;
310 pd->flags.requested = true;
311
312 /* compute future request components */
313
314 if (p->digest_url)
315 url = xstrdup(p->digest_url);
316 else
317 url = xstrdup(internalRemoteUri(p->host, p->http_port, "/squid-internal-periodic/", StoreDigestFileName));
318
319 req = HttpRequest::CreateFromUrl(url);
320
321 assert(req);
322
323 key = storeKeyPublicByRequest(req);
324
325 debugs(72, 2, "peerDigestRequest: " << url << " key: " << storeKeyText(key));
326
327 /* add custom headers */
328 assert(!req->header.len);
329
330 req->header.putStr(HDR_ACCEPT, StoreDigestMimeStr);
331
332 req->header.putStr(HDR_ACCEPT, "text/html");
333
334 if (p->login &&
335 p->login[0] != '*' &&
336 strcmp(p->login, "PASS") != 0 &&
337 strcmp(p->login, "PASSTHRU") != 0 &&
338 strcmp(p->login, "NEGOTIATE") != 0 &&
339 strcmp(p->login, "PROXYPASS") != 0) {
340 xstrncpy(req->login, p->login, MAX_LOGIN_SZ);
341 }
342 /* create fetch state structure */
343 CBDATA_INIT_TYPE(DigestFetchState);
344
345 fetch = cbdataAlloc(DigestFetchState);
346
347 fetch->request = req;
348 HTTPMSGLOCK(fetch->request);
349
350 fetch->pd = cbdataReference(pd);
351
352 fetch->offset = 0;
353
354 fetch->state = DIGEST_READ_REPLY;
355
356 /* update timestamps */
357 fetch->start_time = squid_curtime;
358
359 pd->times.requested = squid_curtime;
360
361 pd_last_req_time = squid_curtime;
362
363 req->flags.cachable = true;
364
365 /* the rest is based on clientProcessExpired() */
366 req->flags.refresh = true;
367
368 old_e = fetch->old_entry = Store::Root().get(key);
369
370 if (old_e) {
371 debugs(72, 5, "peerDigestRequest: found old entry");
372
373 old_e->lock("peerDigestRequest");
374 old_e->createMemObject(url, url, req->method);
375
376 fetch->old_sc = storeClientListAdd(old_e, fetch);
377 }
378
379 e = fetch->entry = storeCreateEntry(url, url, req->flags, req->method);
380 assert(EBIT_TEST(e->flags, KEY_PRIVATE));
381 fetch->sc = storeClientListAdd(e, fetch);
382 /* set lastmod to trigger IMS request if possible */
383
384 if (old_e)
385 e->lastmod = old_e->lastmod;
386
387 /* push towards peer cache */
388 debugs(72, 3, "peerDigestRequest: forwarding to fwdStart...");
389
390 FwdState::fwdStart(Comm::ConnectionPointer(), e, req);
391
392 tempBuffer.offset = 0;
393
394 tempBuffer.length = SM_PAGE_SIZE;
395
396 tempBuffer.data = fetch->buf;
397
398 storeClientCopy(fetch->sc, e, tempBuffer,
399 peerDigestHandleReply, fetch);
400
401 safe_free(url);
402 }
403
404 /* Handle the data copying .. */
405
406 /*
407 * This routine handles the copy data and then redirects the
408 * copy to a bunch of subfunctions depending upon the copy state.
409 * It also tracks the buffer offset and "seen", since I'm actually
410 * not interested in rewriting everything to suit my little idea.
411 */
412 static void
413 peerDigestHandleReply(void *data, StoreIOBuffer receivedData)
414 {
415 DigestFetchState *fetch = (DigestFetchState *)data;
416 int retsize = -1;
417 digest_read_state_t prevstate;
418 int newsize;
419
420 assert(fetch->pd && receivedData.data);
421 /* The existing code assumes that the received pointer is
422 * where we asked the data to be put
423 */
424 assert(fetch->buf + fetch->bufofs == receivedData.data);
425
426 /* Update the buffer size */
427 fetch->bufofs += receivedData.length;
428
429 assert(fetch->bufofs <= SM_PAGE_SIZE);
430
431 /* If we've fetched enough, return */
432
433 if (peerDigestFetchedEnough(fetch, fetch->buf, fetch->bufofs, "peerDigestHandleReply"))
434 return;
435
436 /* Call the right function based on the state */
437 /* (Those functions will update the state if needed) */
438
439 /* Give us a temporary reference. Some of the calls we make may
440 * try to destroy the fetch structure, and we like to know if they
441 * do
442 */
443 fetch = cbdataReference(fetch);
444
445 /* Repeat this loop until we're out of data OR the state changes */
446 /* (So keep going if the state has changed and we still have data */
447 do {
448 prevstate = fetch->state;
449
450 switch (fetch->state) {
451
452 case DIGEST_READ_REPLY:
453 retsize = peerDigestFetchReply(fetch, fetch->buf, fetch->bufofs);
454 break;
455
456 case DIGEST_READ_HEADERS:
457 retsize = peerDigestSwapInHeaders(fetch, fetch->buf, fetch->bufofs);
458 break;
459
460 case DIGEST_READ_CBLOCK:
461 retsize = peerDigestSwapInCBlock(fetch, fetch->buf, fetch->bufofs);
462 break;
463
464 case DIGEST_READ_MASK:
465 retsize = peerDigestSwapInMask(fetch, fetch->buf, fetch->bufofs);
466 break;
467
468 case DIGEST_READ_NONE:
469 break;
470
471 case DIGEST_READ_DONE:
472 goto finish;
473 break;
474
475 default:
476 fatal("Bad digest transfer mode!\n");
477 }
478
479 if (retsize < 0)
480 goto finish;
481
482 /*
483 * The returned size indicates how much of the buffer was read -
484 * so move the remainder of the buffer to the beginning
485 * and update the bufofs / bufsize
486 */
487 newsize = fetch->bufofs - retsize;
488
489 memmove(fetch->buf, fetch->buf + retsize, fetch->bufofs - newsize);
490
491 fetch->bufofs = newsize;
492
493 } while (cbdataReferenceValid(fetch) && prevstate != fetch->state && fetch->bufofs > 0);
494
495 /* Update the copy offset */
496 fetch->offset += receivedData.length;
497
498 /* Schedule another copy */
499 if (cbdataReferenceValid(fetch)) {
500 StoreIOBuffer tempBuffer;
501 tempBuffer.offset = fetch->offset;
502 tempBuffer.length = SM_PAGE_SIZE - fetch->bufofs;
503 tempBuffer.data = fetch->buf + fetch->bufofs;
504 storeClientCopy(fetch->sc, fetch->entry, tempBuffer,
505 peerDigestHandleReply, fetch);
506 }
507
508 finish:
509 /* Get rid of our reference, we've finished with it for now */
510 cbdataReferenceDone(fetch);
511 }
512
513 /* wait for full http headers to be received then parse them */
514 /*
515 * This routine handles parsing the reply line.
516 * If the reply line indicates an OK, the same data is thrown
517 * to SwapInHeaders(). If the reply line is a NOT_MODIFIED,
518 * we simply stop parsing.
519 */
520 static int
521 peerDigestFetchReply(void *data, char *buf, ssize_t size)
522 {
523 DigestFetchState *fetch = (DigestFetchState *)data;
524 PeerDigest *pd = fetch->pd;
525 size_t hdr_size;
526 assert(pd && buf);
527 assert(!fetch->offset);
528
529 assert(fetch->state == DIGEST_READ_REPLY);
530
531 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply"))
532 return -1;
533
534 if ((hdr_size = headersEnd(buf, size))) {
535 HttpReply const *reply = fetch->entry->getReply();
536 assert(reply);
537 assert(reply->sline.status() != Http::scNone);
538 const Http::StatusCode status = reply->sline.status();
539 debugs(72, 3, "peerDigestFetchReply: " << pd->host << " status: " << status <<
540 ", expires: " << (long int) reply->expires << " (" << std::showpos <<
541 (int) (reply->expires - squid_curtime) << ")");
542
543 /* this "if" is based on clientHandleIMSReply() */
544
545 if (status == Http::scNotModified) {
546 /* our old entry is fine */
547 assert(fetch->old_entry);
548
549 if (!fetch->old_entry->mem_obj->request) {
550 fetch->old_entry->mem_obj->request = fetch->entry->mem_obj->request;
551 HTTPMSGLOCK(fetch->old_entry->mem_obj->request);
552 }
553
554 assert(fetch->old_entry->mem_obj->request);
555
556 HttpReply *old_rep = (HttpReply *) fetch->old_entry->getReply();
557
558 old_rep->updateOnNotModified(reply);
559
560 fetch->old_entry->timestampsSet();
561
562 /* get rid of 304 reply */
563 storeUnregister(fetch->sc, fetch->entry, fetch);
564
565 fetch->entry->unlock("peerDigestFetchReply 304");
566
567 fetch->entry = fetch->old_entry;
568
569 fetch->old_entry = NULL;
570
571 /* preserve request -- we need its size to update counters */
572 /* requestUnlink(r); */
573 /* fetch->entry->mem_obj->request = NULL; */
574 } else if (status == Http::scOkay) {
575 /* get rid of old entry if any */
576
577 if (fetch->old_entry) {
578 debugs(72, 3, "peerDigestFetchReply: got new digest, releasing old one");
579 storeUnregister(fetch->old_sc, fetch->old_entry, fetch);
580 fetch->old_entry->releaseRequest();
581 fetch->old_entry->unlock("peerDigestFetchReply 200");
582 fetch->old_entry = NULL;
583 }
584 } else {
585 /* some kind of a bug */
586 peerDigestFetchAbort(fetch, buf, reply->sline.reason());
587 return -1; /* XXX -1 will abort stuff in ReadReply! */
588 }
589
590 /* must have a ready-to-use store entry if we got here */
591 /* can we stay with the old in-memory digest? */
592 if (status == Http::scNotModified && fetch->pd->cd) {
593 peerDigestFetchStop(fetch, buf, "Not modified");
594 fetch->state = DIGEST_READ_DONE;
595 } else {
596 fetch->state = DIGEST_READ_HEADERS;
597 }
598 } else {
599 /* need more data, do we have space? */
600
601 if (size >= SM_PAGE_SIZE)
602 peerDigestFetchAbort(fetch, buf, "reply header too big");
603 }
604
605 /* We don't want to actually ack that we've handled anything,
606 * otherwise SwapInHeaders() won't get the reply line .. */
607 return 0;
608 }
609
610 /* fetch headers from disk, pass on to SwapInCBlock */
611 int
612 peerDigestSwapInHeaders(void *data, char *buf, ssize_t size)
613 {
614 DigestFetchState *fetch = (DigestFetchState *)data;
615 size_t hdr_size;
616
617 assert(fetch->state == DIGEST_READ_HEADERS);
618
619 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInHeaders"))
620 return -1;
621
622 assert(!fetch->offset);
623
624 if ((hdr_size = headersEnd(buf, size))) {
625 assert(fetch->entry->getReply());
626 assert(fetch->entry->getReply()->sline.status() != Http::scNone);
627
628 if (fetch->entry->getReply()->sline.status() != Http::scOkay) {
629 debugs(72, DBG_IMPORTANT, "peerDigestSwapInHeaders: " << fetch->pd->host <<
630 " status " << fetch->entry->getReply()->sline.status() <<
631 " got cached!");
632
633 peerDigestFetchAbort(fetch, buf, "internal status error");
634 return -1;
635 }
636
637 fetch->state = DIGEST_READ_CBLOCK;
638 return hdr_size; /* Say how much data we read */
639 } else {
640 /* need more data, do we have space? */
641
642 if (size >= SM_PAGE_SIZE) {
643 peerDigestFetchAbort(fetch, buf, "stored header too big");
644 return -1;
645 } else {
646 return 0; /* We need to read more to parse .. */
647 }
648 }
649
650 fatal("peerDigestSwapInHeaders() - shouldn't get here!\n");
651 return 0; /* keep gcc happy */
652 }
653
654 int
655 peerDigestSwapInCBlock(void *data, char *buf, ssize_t size)
656 {
657 DigestFetchState *fetch = (DigestFetchState *)data;
658
659 assert(fetch->state == DIGEST_READ_CBLOCK);
660
661 if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInCBlock"))
662 return -1;
663
664 if (size >= (ssize_t)StoreDigestCBlockSize) {
665 PeerDigest *pd = fetch->pd;
666
667 assert(pd && fetch->entry->getReply());
668
669 if (peerDigestSetCBlock(pd, buf)) {
670 /* XXX: soon we will have variable header size */
671 /* switch to CD buffer and fetch digest guts */
672 buf = NULL;
673 assert(pd->cd->mask);
674 fetch->state = DIGEST_READ_MASK;
675 return StoreDigestCBlockSize;
676 } else {
677 peerDigestFetchAbort(fetch, buf, "invalid digest cblock");
678 return -1;
679 }
680 } else {
681 /* need more data, do we have space? */
682
683 if (size >= SM_PAGE_SIZE) {
684 peerDigestFetchAbort(fetch, buf, "digest cblock too big");
685 return -1;
686 } else {
687 return 0; /* We need more data */
688 }
689 }
690
691 fatal("peerDigestSwapInCBlock(): shouldn't get here!\n");
692 return 0; /* keep gcc happy */
693 }
694
695 int
696 peerDigestSwapInMask(void *data, char *buf, ssize_t size)
697 {
698 DigestFetchState *fetch = (DigestFetchState *)data;
699 PeerDigest *pd;
700
701 pd = fetch->pd;
702 assert(pd->cd && pd->cd->mask);
703
704 /*
705 * NOTENOTENOTENOTENOTE: buf doesn't point to pd->cd->mask anymore!
706 * we need to do the copy ourselves!
707 */
708 memcpy(pd->cd->mask + fetch->mask_offset, buf, size);
709
710 /* NOTE! buf points to the middle of pd->cd->mask! */
711
712 if (peerDigestFetchedEnough(fetch, NULL, size, "peerDigestSwapInMask"))
713 return -1;
714
715 fetch->mask_offset += size;
716
717 if (fetch->mask_offset >= pd->cd->mask_size) {
718 debugs(72, 2, "peerDigestSwapInMask: Done! Got " <<
719 fetch->mask_offset << ", expected " << pd->cd->mask_size);
720 assert(fetch->mask_offset == pd->cd->mask_size);
721 assert(peerDigestFetchedEnough(fetch, NULL, 0, "peerDigestSwapInMask"));
722 return -1; /* XXX! */
723 } else {
724 /* We always read everything, so return so */
725 return size;
726 }
727
728 fatal("peerDigestSwapInMask(): shouldn't get here!\n");
729 return 0; /* keep gcc happy */
730 }
731
732 static int
733 peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name)
734 {
735 PeerDigest *pd = NULL;
736 const char *host = "<unknown>"; /* peer host */
737 const char *reason = NULL; /* reason for completion */
738 const char *no_bug = NULL; /* successful completion if set */
739 const int pdcb_valid = cbdataReferenceValid(fetch->pd);
740 const int pcb_valid = cbdataReferenceValid(fetch->pd->peer);
741
742 /* test possible exiting conditions (the same for most steps!)
743 * cases marked with '?!' should not happen */
744
745 if (!reason) {
746 if (!(pd = fetch->pd))
747 reason = "peer digest disappeared?!";
748
749 #if DONT /* WHY NOT? /HNO */
750
751 else if (!cbdataReferenceValid(pd))
752 reason = "invalidated peer digest?!";
753
754 #endif
755
756 else
757 host = pd->host.termedBuf();
758 }
759
760 debugs(72, 6, step_name << ": peer " << host << ", offset: " <<
761 fetch->offset << " size: " << size << ".");
762
763 /* continue checking (with pd and host known and valid) */
764
765 if (!reason) {
766 if (!cbdataReferenceValid(pd->peer))
767 reason = "peer disappeared";
768 else if (size < 0)
769 reason = "swap failure";
770 else if (!fetch->entry)
771 reason = "swap aborted?!";
772 else if (EBIT_TEST(fetch->entry->flags, ENTRY_ABORTED))
773 reason = "swap aborted";
774 }
775
776 /* continue checking (maybe-successful eof case) */
777 if (!reason && !size) {
778 if (!pd->cd)
779 reason = "null digest?!";
780 else if (fetch->mask_offset != (int)pd->cd->mask_size)
781 reason = "premature end of digest?!";
782 else if (!peerDigestUseful(pd))
783 reason = "useless digest";
784 else
785 reason = no_bug = "success";
786 }
787
788 /* finish if we have a reason */
789 if (reason) {
790 const int level = strstr(reason, "?!") ? 1 : 3;
791 debugs(72, level, "" << step_name << ": peer " << host << ", exiting after '" << reason << "'");
792 peerDigestReqFinish(fetch, buf,
793 1, pdcb_valid, pcb_valid, reason, !no_bug);
794 } else {
795 /* paranoid check */
796 assert(pdcb_valid && pcb_valid);
797 }
798
799 return reason != NULL;
800 }
801
802 /* call this when all callback data is valid and fetch must be stopped but
803 * no error has occurred (e.g. we received 304 reply and reuse old digest) */
804 static void
805 peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason)
806 {
807 assert(reason);
808 debugs(72, 2, "peerDigestFetchStop: peer " << fetch->pd->host << ", reason: " << reason);
809 peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 0);
810 }
811
812 /* call this when all callback data is valid but something bad happened */
813 static void
814 peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason)
815 {
816 assert(reason);
817 debugs(72, 2, "peerDigestFetchAbort: peer " << fetch->pd->host << ", reason: " << reason);
818 peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 1);
819 }
820
821 /* complete the digest transfer, update stats, unlock/release everything */
822 static void
823 peerDigestReqFinish(DigestFetchState * fetch, char *buf,
824 int fcb_valid, int pdcb_valid, int pcb_valid,
825 const char *reason, int err)
826 {
827 assert(reason);
828
829 /* must go before peerDigestPDFinish */
830
831 if (pdcb_valid) {
832 fetch->pd->flags.requested = false;
833 fetch->pd->req_result = reason;
834 }
835
836 /* schedule next check if peer is still out there */
837 if (pcb_valid) {
838 PeerDigest *pd = fetch->pd;
839
840 if (err) {
841 pd->times.retry_delay = peerDigestIncDelay(pd);
842 peerDigestSetCheck(pd, pd->times.retry_delay);
843 } else {
844 pd->times.retry_delay = 0;
845 peerDigestSetCheck(pd, peerDigestNewDelay(fetch->entry));
846 }
847 }
848
849 /* note: order is significant */
850 if (fcb_valid)
851 peerDigestFetchSetStats(fetch);
852
853 if (pdcb_valid)
854 peerDigestPDFinish(fetch, pcb_valid, err);
855
856 if (fcb_valid)
857 peerDigestFetchFinish(fetch, err);
858 }
859
860 /* destroys digest if peer disappeared
861 * must be called only when fetch and pd cbdata are valid */
862 static void
863 peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err)
864 {
865 PeerDigest *pd = fetch->pd;
866 const char *host = pd->host.termedBuf();
867
868 pd->times.received = squid_curtime;
869 pd->times.req_delay = fetch->resp_time;
870 kb_incr(&pd->stats.sent.kbytes, (size_t) fetch->sent.bytes);
871 kb_incr(&pd->stats.recv.kbytes, (size_t) fetch->recv.bytes);
872 pd->stats.sent.msgs += fetch->sent.msg;
873 pd->stats.recv.msgs += fetch->recv.msg;
874
875 if (err) {
876 debugs(72, DBG_IMPORTANT, "" << (pcb_valid ? "temporary " : "" ) << "disabling (" << pd->req_result << ") digest from " << host);
877
878 if (pd->cd) {
879 cacheDigestDestroy(pd->cd);
880 pd->cd = NULL;
881 }
882
883 pd->flags.usable = false;
884
885 if (!pcb_valid)
886 peerDigestNotePeerGone(pd);
887 } else {
888 assert(pcb_valid);
889
890 pd->flags.usable = true;
891
892 /* XXX: ugly condition, but how? */
893
894 if (fetch->entry->store_status == STORE_OK)
895 debugs(72, 2, "re-used old digest from " << host);
896 else
897 debugs(72, 2, "received valid digest from " << host);
898 }
899
900 cbdataReferenceDone(fetch->pd);
901 }
902
903 /* free fetch state structures
904 * must be called only when fetch cbdata is valid */
905 static void
906 peerDigestFetchFinish(DigestFetchState * fetch, int err)
907 {
908 assert(fetch->entry && fetch->request);
909
910 if (fetch->old_entry) {
911 debugs(72, 3, "peerDigestFetchFinish: deleting old entry");
912 storeUnregister(fetch->old_sc, fetch->old_entry, fetch);
913 fetch->old_entry->releaseRequest();
914 fetch->old_entry->unlock("peerDigestFetchFinish old");
915 fetch->old_entry = NULL;
916 }
917
918 /* update global stats */
919 kb_incr(&statCounter.cd.kbytes_sent, (size_t) fetch->sent.bytes);
920
921 kb_incr(&statCounter.cd.kbytes_recv, (size_t) fetch->recv.bytes);
922
923 statCounter.cd.msgs_sent += fetch->sent.msg;
924
925 statCounter.cd.msgs_recv += fetch->recv.msg;
926
927 /* unlock everything */
928 storeUnregister(fetch->sc, fetch->entry, fetch);
929
930 fetch->entry->unlock("peerDigestFetchFinish new");
931
932 HTTPMSGUNLOCK(fetch->request);
933
934 fetch->entry = NULL;
935
936 assert(fetch->pd == NULL);
937
938 cbdataFree(fetch);
939 }
940
941 /* calculate fetch stats after completion */
942 static void
943 peerDigestFetchSetStats(DigestFetchState * fetch)
944 {
945 MemObject *mem;
946 assert(fetch->entry && fetch->request);
947
948 mem = fetch->entry->mem_obj;
949 assert(mem);
950
951 /* XXX: outgoing numbers are not precise */
952 /* XXX: we must distinguish between 304 hits and misses here */
953 fetch->sent.bytes = fetch->request->prefixLen();
954 /* XXX: this is slightly wrong: we don't KNOW that the entire memobject
955 * was fetched. We only know how big it is
956 */
957 fetch->recv.bytes = mem->size();
958 fetch->sent.msg = fetch->recv.msg = 1;
959 fetch->expires = fetch->entry->expires;
960 fetch->resp_time = squid_curtime - fetch->start_time;
961
962 debugs(72, 3, "peerDigestFetchFinish: recv " << fetch->recv.bytes <<
963 " bytes in " << (int) fetch->resp_time << " secs");
964
965 debugs(72, 3, "peerDigestFetchFinish: expires: " <<
966 (long int) fetch->expires << " (" << std::showpos <<
967 (int) (fetch->expires - squid_curtime) << "), lmt: " <<
968 std::noshowpos << (long int) fetch->entry->lastmod << " (" <<
969 std::showpos << (int) (fetch->entry->lastmod - squid_curtime) <<
970 ")");
971
972 }
973
974 static int
975 peerDigestSetCBlock(PeerDigest * pd, const char *buf)
976 {
977 StoreDigestCBlock cblock;
978 int freed_size = 0;
979 const char *host = pd->host.termedBuf();
980
981 memcpy(&cblock, buf, sizeof(cblock));
982 /* network -> host conversions */
983 cblock.ver.current = ntohs(cblock.ver.current);
984 cblock.ver.required = ntohs(cblock.ver.required);
985 cblock.capacity = ntohl(cblock.capacity);
986 cblock.count = ntohl(cblock.count);
987 cblock.del_count = ntohl(cblock.del_count);
988 cblock.mask_size = ntohl(cblock.mask_size);
989 debugs(72, 2, "got digest cblock from " << host << "; ver: " <<
990 (int) cblock.ver.current << " (req: " << (int) cblock.ver.required <<
991 ")");
992
993 debugs(72, 2, "\t size: " <<
994 cblock.mask_size << " bytes, e-cnt: " <<
995 cblock.count << ", e-util: " <<
996 xpercentInt(cblock.count, cblock.capacity) << "%" );
997 /* check version requirements (both ways) */
998
999 if (cblock.ver.required > CacheDigestVer.current) {
1000 debugs(72, DBG_IMPORTANT, "" << host << " digest requires version " <<
1001 cblock.ver.required << "; have: " << CacheDigestVer.current);
1002
1003 return 0;
1004 }
1005
1006 if (cblock.ver.current < CacheDigestVer.required) {
1007 debugs(72, DBG_IMPORTANT, "" << host << " digest is version " <<
1008 cblock.ver.current << "; we require: " <<
1009 CacheDigestVer.required);
1010
1011 return 0;
1012 }
1013
1014 /* check consistency */
1015 if (cblock.ver.required > cblock.ver.current ||
1016 cblock.mask_size <= 0 || cblock.capacity <= 0 ||
1017 cblock.bits_per_entry <= 0 || cblock.hash_func_count <= 0) {
1018 debugs(72, DBG_CRITICAL, "" << host << " digest cblock is corrupted.");
1019 return 0;
1020 }
1021
1022 /* check consistency further */
1023 if ((size_t)cblock.mask_size != cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry)) {
1024 debugs(72, DBG_CRITICAL, host << " digest cblock is corrupted " <<
1025 "(mask size mismatch: " << cblock.mask_size << " ? " <<
1026 cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry)
1027 << ").");
1028 return 0;
1029 }
1030
1031 /* there are some things we cannot do yet */
1032 if (cblock.hash_func_count != CacheDigestHashFuncCount) {
1033 debugs(72, DBG_CRITICAL, "" << host << " digest: unsupported #hash functions: " <<
1034 cblock.hash_func_count << " ? " << CacheDigestHashFuncCount << ".");
1035 return 0;
1036 }
1037
1038 /*
1039 * no cblock bugs below this point
1040 */
1041 /* check size changes */
1042 if (pd->cd && cblock.mask_size != (ssize_t)pd->cd->mask_size) {
1043 debugs(72, 2, host << " digest changed size: " << cblock.mask_size <<
1044 " -> " << pd->cd->mask_size);
1045 freed_size = pd->cd->mask_size;
1046 cacheDigestDestroy(pd->cd);
1047 pd->cd = NULL;
1048 }
1049
1050 if (!pd->cd) {
1051 debugs(72, 2, "creating " << host << " digest; size: " << cblock.mask_size << " (" <<
1052 std::showpos << (int) (cblock.mask_size - freed_size) << ") bytes");
1053 pd->cd = cacheDigestCreate(cblock.capacity, cblock.bits_per_entry);
1054
1055 if (cblock.mask_size >= freed_size)
1056 kb_incr(&statCounter.cd.memory, cblock.mask_size - freed_size);
1057 }
1058
1059 assert(pd->cd);
1060 /* these assignments leave us in an inconsistent state until we finish reading the digest */
1061 pd->cd->count = cblock.count;
1062 pd->cd->del_count = cblock.del_count;
1063 return 1;
1064 }
1065
1066 static int
1067 peerDigestUseful(const PeerDigest * pd)
1068 {
1069 /* TODO: we should calculate the prob of a false hit instead of bit util */
1070 const int bit_util = cacheDigestBitUtil(pd->cd);
1071
1072 if (bit_util > 65) {
1073 debugs(72, DBG_CRITICAL, "Warning: " << pd->host <<
1074 " peer digest has too many bits on (" << bit_util << "%%).");
1075
1076 return 0;
1077 }
1078
1079 return 1;
1080 }
1081
1082 static int
1083 saneDiff(time_t diff)
1084 {
1085 return abs((int) diff) > squid_curtime / 2 ? 0 : diff;
1086 }
1087
1088 void
1089 peerDigestStatsReport(const PeerDigest * pd, StoreEntry * e)
1090 {
1091 #define f2s(flag) (pd->flags.flag ? "yes" : "no")
1092 #define appendTime(tm) storeAppendPrintf(e, "%s\t %10ld\t %+d\t %+d\n", \
1093 ""#tm, (long int)pd->times.tm, \
1094 saneDiff(pd->times.tm - squid_curtime), \
1095 saneDiff(pd->times.tm - pd->times.initialized))
1096
1097 assert(pd);
1098
1099 const char *host = pd->host.termedBuf();
1100 storeAppendPrintf(e, "\npeer digest from %s\n", host);
1101
1102 cacheDigestGuessStatsReport(&pd->stats.guess, e, host);
1103
1104 storeAppendPrintf(e, "\nevent\t timestamp\t secs from now\t secs from init\n");
1105 appendTime(initialized);
1106 appendTime(needed);
1107 appendTime(requested);
1108 appendTime(received);
1109 appendTime(next_check);
1110
1111 storeAppendPrintf(e, "peer digest state:\n");
1112 storeAppendPrintf(e, "\tneeded: %3s, usable: %3s, requested: %3s\n",
1113 f2s(needed), f2s(usable), f2s(requested));
1114 storeAppendPrintf(e, "\n\tlast retry delay: %d secs\n",
1115 (int) pd->times.retry_delay);
1116 storeAppendPrintf(e, "\tlast request response time: %d secs\n",
1117 (int) pd->times.req_delay);
1118 storeAppendPrintf(e, "\tlast request result: %s\n",
1119 pd->req_result ? pd->req_result : "(none)");
1120
1121 storeAppendPrintf(e, "\npeer digest traffic:\n");
1122 storeAppendPrintf(e, "\trequests sent: %d, volume: %d KB\n",
1123 pd->stats.sent.msgs, (int) pd->stats.sent.kbytes.kb);
1124 storeAppendPrintf(e, "\treplies recv: %d, volume: %d KB\n",
1125 pd->stats.recv.msgs, (int) pd->stats.recv.kbytes.kb);
1126
1127 storeAppendPrintf(e, "\npeer digest structure:\n");
1128
1129 if (pd->cd)
1130 cacheDigestReport(pd->cd, host, e);
1131 else
1132 storeAppendPrintf(e, "\tno in-memory copy\n");
1133 }
1134
1135 #endif