]>
Commit | Line | Data |
---|---|---|
9b7de833 | 1 | |
2 | /* | |
06a5ae20 | 3 | * $Id: peer_digest.cc,v 1.105 2005/11/05 00:08:32 wessels Exp $ |
9b7de833 | 4 | * |
5 | * DEBUG: section 72 Peer Digest Routines | |
6 | * AUTHOR: Alex Rousskov | |
7 | * | |
2b6662ba | 8 | * SQUID Web Proxy Cache http://www.squid-cache.org/ |
e25c139f | 9 | * ---------------------------------------------------------- |
9b7de833 | 10 | * |
2b6662ba | 11 | * Squid is the result of efforts by numerous individuals from |
12 | * the Internet community; see the CONTRIBUTORS file for full | |
13 | * details. Many organizations have provided support for Squid's | |
14 | * development; see the SPONSORS file for full details. Squid is | |
15 | * Copyrighted (C) 2001 by the Regents of the University of | |
16 | * California; see the COPYRIGHT file for full details. Squid | |
17 | * incorporates software developed and/or copyrighted by other | |
18 | * sources; see the CREDITS file for full details. | |
9b7de833 | 19 | * |
20 | * This program is free software; you can redistribute it and/or modify | |
21 | * it under the terms of the GNU General Public License as published by | |
22 | * the Free Software Foundation; either version 2 of the License, or | |
23 | * (at your option) any later version. | |
24 | * | |
25 | * This program is distributed in the hope that it will be useful, | |
26 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
27 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
28 | * GNU General Public License for more details. | |
29 | * | |
30 | * You should have received a copy of the GNU General Public License | |
31 | * along with this program; if not, write to the Free Software | |
cbdec147 | 32 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA. |
e25c139f | 33 | * |
9b7de833 | 34 | */ |
35 | ||
36 | #include "squid.h" | |
6cfa8966 | 37 | #if USE_CACHE_DIGESTS |
7a2e5bb5 | 38 | |
528b2c61 | 39 | #include "Store.h" |
40 | #include "HttpRequest.h" | |
41 | #include "HttpReply.h" | |
42 | #include "MemObject.h" | |
598465f1 | 43 | #include "StoreClient.h" |
44 | ||
9b7de833 | 45 | /* local types */ |
46 | ||
47 | /* local prototypes */ | |
e13ee7ad | 48 | static time_t peerDigestIncDelay(const PeerDigest * pd); |
49 | static time_t peerDigestNewDelay(const StoreEntry * e); | |
50 | static void peerDigestSetCheck(PeerDigest * pd, time_t delay); | |
eb16313f | 51 | static void peerDigestClean(PeerDigest *); |
e13ee7ad | 52 | static EVH peerDigestCheck; |
53 | static void peerDigestRequest(PeerDigest * pd); | |
add2192d | 54 | static STCB peerDigestHandleReply; |
3dfaa3d2 | 55 | static int peerDigestFetchReply(void *, char *, ssize_t); |
1f140227 | 56 | int peerDigestSwapInHeaders(void *, char *, ssize_t); |
57 | int peerDigestSwapInCBlock(void *, char *, ssize_t); | |
58 | int peerDigestSwapInMask(void *, char *, ssize_t); | |
4b4cd312 | 59 | static int peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name); |
551e60a9 | 60 | static void peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason); |
e13ee7ad | 61 | static void peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason); |
62 | static void peerDigestReqFinish(DigestFetchState * fetch, char *buf, int, int, int, const char *reason, int err); | |
63 | static void peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err); | |
64 | static void peerDigestFetchFinish(DigestFetchState * fetch, int err); | |
65 | static void peerDigestFetchSetStats(DigestFetchState * fetch); | |
66 | static int peerDigestSetCBlock(PeerDigest * pd, const char *buf); | |
67 | static int peerDigestUseful(const PeerDigest * pd); | |
395b813e | 68 | |
9b7de833 | 69 | |
70 | /* local constants */ | |
9d486b43 | 71 | |
9b7de833 | 72 | #define StoreDigestCBlockSize sizeof(StoreDigestCBlock) |
73 | ||
e13ee7ad | 74 | /* min interval for requesting digests from a given peer */ |
75 | static const time_t PeerDigestReqMinGap = 5 * 60; /* seconds */ | |
76 | /* min interval for requesting digests (cumulative request stream) */ | |
77 | static const time_t GlobDigestReqMinGap = 1 * 60; /* seconds */ | |
bd890734 | 78 | |
79 | /* local vars */ | |
9d486b43 | 80 | |
8a6218c6 | 81 | static time_t pd_last_req_time = 0; /* last call to Check */ |
9b7de833 | 82 | |
e13ee7ad | 83 | /* initialize peer digest */ |
84 | static void | |
8a6218c6 | 85 | peerDigestInit(PeerDigest * pd, peer * p) |
9b7de833 | 86 | { |
e13ee7ad | 87 | assert(pd && p); |
88 | ||
89 | memset(pd, 0, sizeof(*pd)); | |
90 | pd->peer = p; | |
91 | /* if peer disappears, we will know it's name */ | |
528b2c61 | 92 | pd->host = p->host; |
e13ee7ad | 93 | |
94 | pd->times.initialized = squid_curtime; | |
9b7de833 | 95 | } |
96 | ||
4b4cd312 | 97 | static void |
8a6218c6 | 98 | peerDigestClean(PeerDigest * pd) |
9b7de833 | 99 | { |
e13ee7ad | 100 | assert(pd); |
62e76326 | 101 | |
e13ee7ad | 102 | if (pd->cd) |
62e76326 | 103 | cacheDigestDestroy(pd->cd); |
104 | ||
528b2c61 | 105 | pd->host.clean(); |
9b7de833 | 106 | } |
107 | ||
0353e724 | 108 | CBDATA_CLASS_INIT(PeerDigest); |
109 | ||
110 | void * | |
111 | PeerDigest::operator new (size_t) | |
112 | { | |
113 | CBDATA_INIT_TYPE(PeerDigest); | |
114 | PeerDigest *result = cbdataAlloc(PeerDigest); | |
115 | return result; | |
116 | } | |
117 | ||
118 | void | |
119 | PeerDigest::operator delete (void *address) | |
120 | { | |
121 | PeerDigest *t = static_cast<PeerDigest *>(address); | |
122 | cbdataFree(t); | |
123 | } | |
124 | ||
e13ee7ad | 125 | /* allocate new peer digest, call Init, and lock everything */ |
126 | PeerDigest * | |
8a6218c6 | 127 | peerDigestCreate(peer * p) |
e13ee7ad | 128 | { |
129 | PeerDigest *pd; | |
8a6218c6 | 130 | assert(p); |
e13ee7ad | 131 | |
0353e724 | 132 | pd = new PeerDigest; |
e13ee7ad | 133 | peerDigestInit(pd, p); |
e13ee7ad | 134 | |
fa80a8ef | 135 | /* XXX This does not look right, and the same thing again in the caller */ |
136 | return cbdataReference(pd); | |
e13ee7ad | 137 | } |
138 | ||
139 | /* call Clean and free/unlock everything */ | |
2d72d4fd | 140 | static void |
8a6218c6 | 141 | peerDigestDestroy(PeerDigest * pd) |
e13ee7ad | 142 | { |
fcbbccfe | 143 | peer *p; |
e13ee7ad | 144 | assert(pd); |
e13ee7ad | 145 | |
fa80a8ef | 146 | /* inform peer (if any) that we are gone */ |
62e76326 | 147 | |
bac6d4bd | 148 | if (cbdataReferenceValidDone(pd->peer, (void **) &p)) |
62e76326 | 149 | peerNoteDigestGone(p); |
e13ee7ad | 150 | |
151 | peerDigestClean(pd); | |
62e76326 | 152 | |
00d77d6b | 153 | delete pd; |
e13ee7ad | 154 | } |
155 | ||
156 | /* called by peer to indicate that somebody actually needs this digest */ | |
157 | void | |
8a6218c6 | 158 | peerDigestNeeded(PeerDigest * pd) |
e13ee7ad | 159 | { |
160 | assert(pd); | |
161 | assert(!pd->flags.needed); | |
162 | assert(!pd->cd); | |
163 | ||
164 | pd->flags.needed = 1; | |
165 | pd->times.needed = squid_curtime; | |
8a6218c6 | 166 | peerDigestSetCheck(pd, 0); /* check asap */ |
e13ee7ad | 167 | } |
168 | ||
169 | /* currently we do not have a reason to disable without destroying */ | |
8a6218c6 | 170 | #if FUTURE_CODE |
395b813e | 171 | /* disables peer for good */ |
172 | static void | |
8a6218c6 | 173 | peerDigestDisable(PeerDigest * pd) |
395b813e | 174 | { |
8a6218c6 | 175 | debug(72, 2) ("peerDigestDisable: peer %s disabled for good\n", |
62e76326 | 176 | pd->host.buf()); |
e13ee7ad | 177 | pd->times.disabled = squid_curtime; |
8a6218c6 | 178 | pd->times.next_check = -1; /* never */ |
e13ee7ad | 179 | pd->flags.usable = 0; |
180 | ||
181 | if (pd->cd) { | |
62e76326 | 182 | cacheDigestDestroy(pd->cd); |
183 | pd->cd = NULL; | |
e13ee7ad | 184 | } |
62e76326 | 185 | |
e13ee7ad | 186 | /* we do not destroy the pd itself to preserve its "history" and stats */ |
395b813e | 187 | } |
62e76326 | 188 | |
e13ee7ad | 189 | #endif |
395b813e | 190 | |
e13ee7ad | 191 | /* increment retry delay [after an unsuccessful attempt] */ |
395b813e | 192 | static time_t |
8a6218c6 | 193 | peerDigestIncDelay(const PeerDigest * pd) |
395b813e | 194 | { |
e13ee7ad | 195 | assert(pd); |
196 | return pd->times.retry_delay > 0 ? | |
62e76326 | 197 | 2 * pd->times.retry_delay : /* exponential backoff */ |
198 | PeerDigestReqMinGap; /* minimal delay */ | |
395b813e | 199 | } |
200 | ||
62e76326 | 201 | /* artificially increases Expires: setting to avoid race conditions |
e13ee7ad | 202 | * returns the delay till that [increased] expiration time */ |
00485c29 | 203 | static time_t |
e13ee7ad | 204 | peerDigestNewDelay(const StoreEntry * e) |
00485c29 | 205 | { |
e13ee7ad | 206 | assert(e); |
62e76326 | 207 | |
00485c29 | 208 | if (e->expires > 0) |
62e76326 | 209 | return e->expires + PeerDigestReqMinGap - squid_curtime; |
210 | ||
e13ee7ad | 211 | return PeerDigestReqMinGap; |
00485c29 | 212 | } |
213 | ||
e13ee7ad | 214 | /* registers next digest verification */ |
395b813e | 215 | static void |
e13ee7ad | 216 | peerDigestSetCheck(PeerDigest * pd, time_t delay) |
395b813e | 217 | { |
e13ee7ad | 218 | eventAdd("peerDigestCheck", peerDigestCheck, pd, (double) delay, 1); |
219 | pd->times.next_check = squid_curtime + delay; | |
220 | debug(72, 3) ("peerDigestSetCheck: will check peer %s in %d secs\n", | |
62e76326 | 221 | pd->host.buf(), (int) delay); |
e13ee7ad | 222 | } |
223 | ||
5385c86a | 224 | /* |
225 | * called when peer is about to disappear or have already disappeared | |
226 | */ | |
e13ee7ad | 227 | void |
8a6218c6 | 228 | peerDigestNotePeerGone(PeerDigest * pd) |
229 | { | |
e13ee7ad | 230 | if (pd->flags.requested) { |
62e76326 | 231 | debug(72, 2) ("peerDigest: peer %s gone, will destroy after fetch.\n", |
232 | pd->host.buf()); | |
233 | /* do nothing now, the fetching chain will notice and take action */ | |
395b813e | 234 | } else { |
62e76326 | 235 | debug(72, 2) ("peerDigest: peer %s is gone, destroying now.\n", |
236 | pd->host.buf()); | |
237 | peerDigestDestroy(pd); | |
395b813e | 238 | } |
239 | } | |
240 | ||
e13ee7ad | 241 | /* callback for eventAdd() (with peer digest locked) |
242 | * request new digest if our copy is too old or if we lack one; | |
243 | * schedule next check otherwise */ | |
9b7de833 | 244 | static void |
e13ee7ad | 245 | peerDigestCheck(void *data) |
9b7de833 | 246 | { |
e6ccf245 | 247 | PeerDigest *pd = (PeerDigest *)data; |
e13ee7ad | 248 | time_t req_time; |
249 | ||
e13ee7ad | 250 | assert(!pd->flags.requested); |
5d9bb360 | 251 | |
8a6218c6 | 252 | pd->times.next_check = 0; /* unknown */ |
e13ee7ad | 253 | |
fa80a8ef | 254 | if (!cbdataReferenceValid(pd->peer)) { |
62e76326 | 255 | peerDigestNotePeerGone(pd); |
256 | return; | |
9b7de833 | 257 | } |
62e76326 | 258 | |
e13ee7ad | 259 | debug(72, 3) ("peerDigestCheck: peer %s:%d\n", pd->peer->host, pd->peer->http_port); |
38650cc8 | 260 | debug(72, 3) ("peerDigestCheck: time: %ld, last received: %ld (%+d)\n", |
62e76326 | 261 | (long int) squid_curtime, (long int) pd->times.received, (int) (squid_curtime - pd->times.received)); |
e13ee7ad | 262 | |
263 | /* decide when we should send the request: | |
264 | * request now unless too close to other requests */ | |
265 | req_time = squid_curtime; | |
266 | ||
267 | /* per-peer limit */ | |
62e76326 | 268 | |
e13ee7ad | 269 | if (req_time - pd->times.received < PeerDigestReqMinGap) { |
62e76326 | 270 | debug(72, 2) ("peerDigestCheck: %s, avoiding close peer requests (%d < %d secs).\n", |
271 | pd->host.buf(), (int) (req_time - pd->times.received), | |
272 | (int) PeerDigestReqMinGap); | |
273 | req_time = pd->times.received + PeerDigestReqMinGap; | |
bd890734 | 274 | } |
62e76326 | 275 | |
e13ee7ad | 276 | /* global limit */ |
277 | if (req_time - pd_last_req_time < GlobDigestReqMinGap) { | |
62e76326 | 278 | debug(72, 2) ("peerDigestCheck: %s, avoiding close requests (%d < %d secs).\n", |
279 | pd->host.buf(), (int) (req_time - pd_last_req_time), | |
280 | (int) GlobDigestReqMinGap); | |
281 | req_time = pd_last_req_time + GlobDigestReqMinGap; | |
9b7de833 | 282 | } |
62e76326 | 283 | |
e13ee7ad | 284 | if (req_time <= squid_curtime) |
62e76326 | 285 | peerDigestRequest(pd); /* will set pd->flags.requested */ |
e13ee7ad | 286 | else |
62e76326 | 287 | peerDigestSetCheck(pd, req_time - squid_curtime); |
9b7de833 | 288 | } |
289 | ||
2812146b | 290 | CBDATA_TYPE(DigestFetchState); |
291 | ||
e13ee7ad | 292 | /* ask store for a digest */ |
9b7de833 | 293 | static void |
e13ee7ad | 294 | peerDigestRequest(PeerDigest * pd) |
9b7de833 | 295 | { |
e13ee7ad | 296 | peer *p = pd->peer; |
9b7de833 | 297 | StoreEntry *e, *old_e; |
298 | char *url; | |
9d486b43 | 299 | const cache_key *key; |
190154cf | 300 | HttpRequest *req; |
9b7de833 | 301 | DigestFetchState *fetch = NULL; |
528b2c61 | 302 | StoreIOBuffer tempBuffer; |
e13ee7ad | 303 | |
304 | pd->req_result = NULL; | |
305 | pd->flags.requested = 1; | |
306 | ||
9b7de833 | 307 | /* compute future request components */ |
62e76326 | 308 | |
7e3ce7b9 | 309 | if (p->digest_url) |
62e76326 | 310 | url = xstrdup(p->digest_url); |
7e3ce7b9 | 311 | else |
62e76326 | 312 | url = internalRemoteUri(p->host, p->http_port, |
313 | "/squid-internal-periodic/", StoreDigestFileName); | |
7e3ce7b9 | 314 | |
50a97cee | 315 | req = urlParse(METHOD_GET, url); |
62e76326 | 316 | |
e13ee7ad | 317 | assert(req); |
62e76326 | 318 | |
f66a9ef4 | 319 | key = storeKeyPublicByRequest(req); |
62e76326 | 320 | |
f66a9ef4 | 321 | debug(72, 2) ("peerDigestRequest: %s key: %s\n", url, storeKeyText(key)); |
e13ee7ad | 322 | |
9b7de833 | 323 | /* add custom headers */ |
2246b732 | 324 | assert(!req->header.len); |
62e76326 | 325 | |
4820f62b | 326 | httpHeaderPutStr(&req->header, HDR_ACCEPT, StoreDigestMimeStr); |
62e76326 | 327 | |
4820f62b | 328 | httpHeaderPutStr(&req->header, HDR_ACCEPT, "text/html"); |
62e76326 | 329 | |
9bc73deb | 330 | if (p->login) |
62e76326 | 331 | xstrncpy(req->login, p->login, MAX_LOGIN_SZ); |
332 | ||
9b7de833 | 333 | /* create fetch state structure */ |
2812146b | 334 | CBDATA_INIT_TYPE(DigestFetchState); |
62e76326 | 335 | |
72711e31 | 336 | fetch = cbdataAlloc(DigestFetchState); |
62e76326 | 337 | |
903c39e4 | 338 | fetch->request = requestLink(req); |
62e76326 | 339 | |
fa80a8ef | 340 | fetch->pd = cbdataReference(pd); |
62e76326 | 341 | |
e13ee7ad | 342 | fetch->offset = 0; |
62e76326 | 343 | |
add2192d | 344 | fetch->state = DIGEST_READ_REPLY; |
e13ee7ad | 345 | |
346 | /* update timestamps */ | |
9b7de833 | 347 | fetch->start_time = squid_curtime; |
62e76326 | 348 | |
e13ee7ad | 349 | pd->times.requested = squid_curtime; |
62e76326 | 350 | |
e13ee7ad | 351 | pd_last_req_time = squid_curtime; |
352 | ||
92695e5e | 353 | req->flags.cachable = 1; |
62e76326 | 354 | |
9b7de833 | 355 | /* the rest is based on clientProcessExpired() */ |
92695e5e | 356 | req->flags.refresh = 1; |
62e76326 | 357 | |
c8f4eac4 | 358 | old_e = fetch->old_entry = Store::Root().get(key); |
62e76326 | 359 | |
9b7de833 | 360 | if (old_e) { |
62e76326 | 361 | debug(72, 5) ("peerDigestRequest: found old entry\n"); |
362 | storeLockObject(old_e); | |
363 | storeCreateMemObject(old_e, url, url); | |
364 | fetch->old_sc = storeClientListAdd(old_e, fetch); | |
9b7de833 | 365 | } |
62e76326 | 366 | |
9b7de833 | 367 | e = fetch->entry = storeCreateEntry(url, url, req->flags, req->method); |
e13ee7ad | 368 | assert(EBIT_TEST(e->flags, KEY_PRIVATE)); |
06d2839d | 369 | fetch->sc = storeClientListAdd(e, fetch); |
9b7de833 | 370 | /* set lastmod to trigger IMS request if possible */ |
62e76326 | 371 | |
9b7de833 | 372 | if (old_e) |
62e76326 | 373 | e->lastmod = old_e->lastmod; |
e13ee7ad | 374 | |
9b7de833 | 375 | /* push towards peer cache */ |
e13ee7ad | 376 | debug(72, 3) ("peerDigestRequest: forwarding to fwdStart...\n"); |
62e76326 | 377 | |
7e3ce7b9 | 378 | fwdStart(-1, e, req); |
62e76326 | 379 | |
598465f1 | 380 | tempBuffer.offset = 0; |
62e76326 | 381 | |
f95db407 | 382 | tempBuffer.length = SM_PAGE_SIZE; |
62e76326 | 383 | |
598465f1 | 384 | tempBuffer.data = fetch->buf; |
62e76326 | 385 | |
598465f1 | 386 | storeClientCopy(fetch->sc, e, tempBuffer, |
62e76326 | 387 | peerDigestHandleReply, fetch); |
9b7de833 | 388 | } |
389 | ||
add2192d | 390 | |
391 | /* Handle the data copying .. */ | |
392 | ||
393 | /* | |
394 | * This routine handles the copy data and then redirects the | |
395 | * copy to a bunch of subfunctions depending upon the copy state. | |
396 | * It also tracks the buffer offset and "seen", since I'm actually | |
397 | * not interested in rewriting everything to suit my little idea. | |
398 | */ | |
9b7de833 | 399 | static void |
598465f1 | 400 | peerDigestHandleReply(void *data, StoreIOBuffer recievedData) |
add2192d | 401 | { |
e6ccf245 | 402 | DigestFetchState *fetch = (DigestFetchState *)data; |
add2192d | 403 | int retsize = -1; |
404 | digest_read_state_t prevstate; | |
405 | int newsize; | |
406 | ||
e4a67a80 | 407 | assert(fetch->pd && recievedData.data); |
598465f1 | 408 | /* The existing code assumes that the recieved pointer is |
409 | * where we asked the data to be put | |
410 | */ | |
411 | assert(fetch->buf + fetch->bufofs == recievedData.data); | |
add2192d | 412 | |
413 | /* Update the buffer size */ | |
598465f1 | 414 | fetch->bufofs += recievedData.length; |
add2192d | 415 | |
416 | assert(fetch->bufofs <= SM_PAGE_SIZE); | |
417 | ||
418 | /* If we've fetched enough, return */ | |
62e76326 | 419 | |
add2192d | 420 | if (peerDigestFetchedEnough(fetch, fetch->buf, fetch->bufofs, "peerDigestHandleReply")) |
62e76326 | 421 | return; |
add2192d | 422 | |
423 | /* Call the right function based on the state */ | |
424 | /* (Those functions will update the state if needed) */ | |
fa80a8ef | 425 | |
e2848e94 | 426 | /* Give us a temporary reference. Some of the calls we make may |
427 | * try to destroy the fetch structure, and we like to know if they | |
428 | * do | |
bac6d4bd | 429 | */ |
e2848e94 | 430 | fetch = cbdataReference(fetch); |
add2192d | 431 | |
432 | /* Repeat this loop until we're out of data OR the state changes */ | |
433 | /* (So keep going if the state has changed and we still have data */ | |
434 | do { | |
62e76326 | 435 | prevstate = fetch->state; |
436 | ||
437 | switch (fetch->state) { | |
438 | ||
439 | case DIGEST_READ_REPLY: | |
440 | retsize = peerDigestFetchReply(fetch, fetch->buf, fetch->bufofs); | |
441 | break; | |
442 | ||
443 | case DIGEST_READ_HEADERS: | |
444 | retsize = peerDigestSwapInHeaders(fetch, fetch->buf, fetch->bufofs); | |
445 | break; | |
446 | ||
447 | case DIGEST_READ_CBLOCK: | |
448 | retsize = peerDigestSwapInCBlock(fetch, fetch->buf, fetch->bufofs); | |
449 | break; | |
450 | ||
451 | case DIGEST_READ_MASK: | |
452 | retsize = peerDigestSwapInMask(fetch, fetch->buf, fetch->bufofs); | |
453 | break; | |
454 | ||
455 | case DIGEST_READ_NONE: | |
456 | break; | |
457 | ||
458 | case DIGEST_READ_DONE: | |
459 | goto finish; | |
460 | break; | |
461 | ||
462 | default: | |
463 | fatal("Bad digest transfer mode!\n"); | |
464 | } | |
465 | ||
466 | if (retsize < 0) | |
467 | goto finish; | |
468 | ||
469 | /* | |
470 | * The returned size indicates how much of the buffer was read - | |
471 | * so move the remainder of the buffer to the beginning | |
472 | * and update the bufofs / bufsize | |
473 | */ | |
474 | newsize = fetch->bufofs - retsize; | |
475 | ||
476 | xmemmove(fetch->buf, fetch->buf + retsize, fetch->bufofs - newsize); | |
477 | ||
478 | fetch->bufofs = newsize; | |
add2192d | 479 | |
e2848e94 | 480 | } while (cbdataReferenceValid(fetch) && prevstate != fetch->state && fetch->bufofs > 0); |
add2192d | 481 | |
482 | /* Update the copy offset */ | |
598465f1 | 483 | fetch->offset += recievedData.length; |
add2192d | 484 | |
485 | /* Schedule another copy */ | |
fa80a8ef | 486 | if (cbdataReferenceValid(fetch)) { |
62e76326 | 487 | StoreIOBuffer tempBuffer; |
488 | tempBuffer.offset = fetch->offset; | |
489 | tempBuffer.length = SM_PAGE_SIZE - fetch->bufofs; | |
490 | tempBuffer.data = fetch->buf + fetch->bufofs; | |
491 | storeClientCopy(fetch->sc, fetch->entry, tempBuffer, | |
492 | peerDigestHandleReply, fetch); | |
add2192d | 493 | } |
62e76326 | 494 | |
495 | finish: | |
e2848e94 | 496 | /* Get rid of our reference, we've finished with it for now */ |
497 | cbdataReferenceDone(fetch); | |
add2192d | 498 | } |
499 | ||
500 | ||
501 | ||
502 | /* wait for full http headers to be received then parse them */ | |
503 | /* | |
504 | * This routine handles parsing the reply line. | |
505 | * If the reply line indicates an OK, the same data is thrown | |
506 | * to SwapInHeaders(). If the reply line is a NOT_MODIFIED, | |
507 | * we simply stop parsing. | |
508 | */ | |
509 | static int | |
9b7de833 | 510 | peerDigestFetchReply(void *data, char *buf, ssize_t size) |
511 | { | |
e6ccf245 | 512 | DigestFetchState *fetch = (DigestFetchState *)data; |
e13ee7ad | 513 | PeerDigest *pd = fetch->pd; |
9bc73deb | 514 | size_t hdr_size; |
e13ee7ad | 515 | assert(pd && buf); |
9b7de833 | 516 | assert(!fetch->offset); |
e13ee7ad | 517 | |
add2192d | 518 | assert(fetch->state == DIGEST_READ_REPLY); |
62e76326 | 519 | |
9b7de833 | 520 | if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply")) |
62e76326 | 521 | return -1; |
e13ee7ad | 522 | |
9bc73deb | 523 | if ((hdr_size = headersEnd(buf, size))) { |
62e76326 | 524 | http_status status; |
525 | HttpReply const *reply = fetch->entry->getReply(); | |
526 | assert(reply); | |
527 | assert (reply->sline.status != 0); | |
528 | status = reply->sline.status; | |
529 | debug(72, 3) ("peerDigestFetchReply: %s status: %d, expires: %ld (%+d)\n", | |
530 | pd->host.buf(), status, | |
531 | (long int) reply->expires, (int) (reply->expires - squid_curtime)); | |
532 | ||
533 | /* this "if" is based on clientHandleIMSReply() */ | |
534 | ||
535 | if (status == HTTP_NOT_MODIFIED) { | |
190154cf | 536 | HttpRequest *r = NULL; |
62e76326 | 537 | /* our old entry is fine */ |
538 | assert(fetch->old_entry); | |
539 | ||
540 | if (!fetch->old_entry->mem_obj->request) | |
541 | fetch->old_entry->mem_obj->request = r = | |
542 | requestLink(fetch->entry->mem_obj->request); | |
543 | ||
544 | assert(fetch->old_entry->mem_obj->request); | |
545 | ||
07947ad8 | 546 | HttpReply *old_rep = (HttpReply *) fetch->old_entry->getReply(); |
547 | ||
06a5ae20 | 548 | old_rep->updateOnNotModified(reply); |
62e76326 | 549 | |
550 | storeTimestampsSet(fetch->old_entry); | |
551 | ||
552 | /* get rid of 304 reply */ | |
553 | storeUnregister(fetch->sc, fetch->entry, fetch); | |
554 | ||
555 | storeUnlockObject(fetch->entry); | |
556 | ||
557 | fetch->entry = fetch->old_entry; | |
558 | ||
559 | fetch->old_entry = NULL; | |
560 | ||
561 | /* preserve request -- we need its size to update counters */ | |
562 | /* requestUnlink(r); */ | |
563 | /* fetch->entry->mem_obj->request = NULL; */ | |
564 | } else if (status == HTTP_OK) { | |
565 | /* get rid of old entry if any */ | |
566 | ||
567 | if (fetch->old_entry) { | |
568 | debug(72, 3) ("peerDigestFetchReply: got new digest, releasing old one\n"); | |
569 | storeUnregister(fetch->old_sc, fetch->old_entry, fetch); | |
570 | storeReleaseRequest(fetch->old_entry); | |
571 | storeUnlockObject(fetch->old_entry); | |
572 | fetch->old_entry = NULL; | |
573 | } | |
574 | } else { | |
575 | /* some kind of a bug */ | |
576 | peerDigestFetchAbort(fetch, buf, httpStatusLineReason(&reply->sline)); | |
577 | return -1; /* XXX -1 will abort stuff in ReadReply! */ | |
578 | } | |
579 | ||
580 | /* must have a ready-to-use store entry if we got here */ | |
581 | /* can we stay with the old in-memory digest? */ | |
582 | if (status == HTTP_NOT_MODIFIED && fetch->pd->cd) { | |
583 | peerDigestFetchStop(fetch, buf, "Not modified"); | |
584 | fetch->state = DIGEST_READ_DONE; | |
585 | } else { | |
586 | fetch->state = DIGEST_READ_HEADERS; | |
587 | } | |
9b7de833 | 588 | } else { |
62e76326 | 589 | /* need more data, do we have space? */ |
590 | ||
591 | if (size >= SM_PAGE_SIZE) | |
592 | peerDigestFetchAbort(fetch, buf, "reply header too big"); | |
9b7de833 | 593 | } |
add2192d | 594 | |
595 | /* We don't want to actually ack that we've handled anything, | |
596 | * otherwise SwapInHeaders() won't get the reply line .. */ | |
597 | return 0; | |
9b7de833 | 598 | } |
599 | ||
600 | /* fetch headers from disk, pass on to SwapInCBlock */ | |
1f140227 | 601 | int |
9b7de833 | 602 | peerDigestSwapInHeaders(void *data, char *buf, ssize_t size) |
603 | { | |
e6ccf245 | 604 | DigestFetchState *fetch = (DigestFetchState *)data; |
9b7de833 | 605 | size_t hdr_size; |
e13ee7ad | 606 | |
add2192d | 607 | assert(fetch->state == DIGEST_READ_HEADERS); |
62e76326 | 608 | |
9b7de833 | 609 | if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInHeaders")) |
62e76326 | 610 | return -1; |
e13ee7ad | 611 | |
9d486b43 | 612 | assert(!fetch->offset); |
62e76326 | 613 | |
9b7de833 | 614 | if ((hdr_size = headersEnd(buf, size))) { |
62e76326 | 615 | assert(fetch->entry->getReply()); |
616 | assert (fetch->entry->getReply()->sline.status != 0); | |
617 | ||
618 | if (fetch->entry->getReply()->sline.status != HTTP_OK) { | |
619 | debug(72, 1) ("peerDigestSwapInHeaders: %s status %d got cached!\n", | |
620 | fetch->pd->host.buf(), fetch->entry->getReply()->sline.status); | |
621 | peerDigestFetchAbort(fetch, buf, "internal status error"); | |
622 | return -1; | |
623 | } | |
624 | ||
625 | fetch->state = DIGEST_READ_CBLOCK; | |
626 | return hdr_size; /* Say how much data we read */ | |
9b7de833 | 627 | } else { |
62e76326 | 628 | /* need more data, do we have space? */ |
629 | ||
630 | if (size >= SM_PAGE_SIZE) { | |
631 | peerDigestFetchAbort(fetch, buf, "stored header too big"); | |
632 | return -1; | |
633 | } else { | |
634 | return 0; /* We need to read more to parse .. */ | |
635 | } | |
9b7de833 | 636 | } |
62e76326 | 637 | |
add2192d | 638 | fatal("peerDigestSwapInHeaders() - shouldn't get here!\n"); |
9b7de833 | 639 | } |
640 | ||
1f140227 | 641 | int |
9b7de833 | 642 | peerDigestSwapInCBlock(void *data, char *buf, ssize_t size) |
643 | { | |
e6ccf245 | 644 | DigestFetchState *fetch = (DigestFetchState *)data; |
e13ee7ad | 645 | |
add2192d | 646 | assert(fetch->state == DIGEST_READ_CBLOCK); |
62e76326 | 647 | |
9b7de833 | 648 | if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInCBlock")) |
62e76326 | 649 | return -1; |
e13ee7ad | 650 | |
e6ccf245 | 651 | if (size >= (ssize_t)StoreDigestCBlockSize) { |
62e76326 | 652 | PeerDigest *pd = fetch->pd; |
62e76326 | 653 | |
e4a67a80 | 654 | assert(pd && fetch->entry->getReply()); |
62e76326 | 655 | |
656 | if (peerDigestSetCBlock(pd, buf)) { | |
657 | /* XXX: soon we will have variable header size */ | |
658 | /* switch to CD buffer and fetch digest guts */ | |
659 | buf = NULL; | |
660 | assert(pd->cd->mask); | |
661 | fetch->state = DIGEST_READ_MASK; | |
662 | return StoreDigestCBlockSize; | |
663 | } else { | |
664 | peerDigestFetchAbort(fetch, buf, "invalid digest cblock"); | |
665 | return -1; | |
666 | } | |
9b7de833 | 667 | } else { |
62e76326 | 668 | /* need more data, do we have space? */ |
669 | ||
670 | if (size >= SM_PAGE_SIZE) { | |
671 | peerDigestFetchAbort(fetch, buf, "digest cblock too big"); | |
672 | return -1; | |
673 | } else { | |
674 | return 0; /* We need more data */ | |
675 | } | |
9b7de833 | 676 | } |
62e76326 | 677 | |
add2192d | 678 | fatal("peerDigestSwapInCBlock(): shouldn't get here!\n"); |
9b7de833 | 679 | } |
680 | ||
1f140227 | 681 | int |
9b7de833 | 682 | peerDigestSwapInMask(void *data, char *buf, ssize_t size) |
683 | { | |
e6ccf245 | 684 | DigestFetchState *fetch = (DigestFetchState *)data; |
e13ee7ad | 685 | PeerDigest *pd; |
686 | ||
e13ee7ad | 687 | pd = fetch->pd; |
688 | assert(pd->cd && pd->cd->mask); | |
9d486b43 | 689 | |
add2192d | 690 | /* |
691 | * NOTENOTENOTENOTENOTE: buf doesn't point to pd->cd->mask anymore! | |
692 | * we need to do the copy ourselves! | |
693 | */ | |
694 | xmemcpy(pd->cd->mask + fetch->mask_offset, buf, size); | |
695 | ||
696 | /* NOTE! buf points to the middle of pd->cd->mask! */ | |
62e76326 | 697 | |
add2192d | 698 | if (peerDigestFetchedEnough(fetch, NULL, size, "peerDigestSwapInMask")) |
62e76326 | 699 | return -1; |
add2192d | 700 | |
da407def | 701 | fetch->mask_offset += size; |
62e76326 | 702 | |
e6ccf245 | 703 | if (fetch->mask_offset >= (off_t)pd->cd->mask_size) { |
e4049756 | 704 | debugs(72, 2, "peerDigestSwapInMask: Done! Got " << |
705 | fetch->mask_offset << ", expected " << pd->cd->mask_size); | |
62e76326 | 706 | assert(fetch->mask_offset == (off_t)pd->cd->mask_size); |
707 | assert(peerDigestFetchedEnough(fetch, NULL, 0, "peerDigestSwapInMask")); | |
708 | return -1; /* XXX! */ | |
e13ee7ad | 709 | } else { |
62e76326 | 710 | /* We always read everything, so return so */ |
711 | return size; | |
9b7de833 | 712 | } |
62e76326 | 713 | |
add2192d | 714 | fatal("peerDigestSwapInMask(): shouldn't get here!\n"); |
9b7de833 | 715 | } |
716 | ||
717 | static int | |
4b4cd312 | 718 | peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name) |
9b7de833 | 719 | { |
e13ee7ad | 720 | PeerDigest *pd = NULL; |
8a6218c6 | 721 | const char *host = "<unknown>"; /* peer host */ |
722 | const char *reason = NULL; /* reason for completion */ | |
723 | const char *no_bug = NULL; /* successful completion if set */ | |
e2848e94 | 724 | const int pdcb_valid = cbdataReferenceValid(fetch->pd); |
725 | const int pcb_valid = cbdataReferenceValid(fetch->pd->peer); | |
e13ee7ad | 726 | |
727 | /* test possible exiting conditions (the same for most steps!) | |
728 | * cases marked with '?!' should not happen */ | |
729 | ||
730 | if (!reason) { | |
62e76326 | 731 | if (!(pd = fetch->pd)) |
732 | reason = "peer digest disappeared?!"; | |
733 | ||
bac6d4bd | 734 | #if DONT /* WHY NOT? /HNO */ |
62e76326 | 735 | |
736 | else if (!cbdataReferenceValid(pd)) | |
737 | reason = "invalidated peer digest?!"; | |
738 | ||
5385c86a | 739 | #endif |
62e76326 | 740 | |
741 | else | |
742 | host = pd->host.buf(); | |
e13ee7ad | 743 | } |
62e76326 | 744 | |
e4049756 | 745 | debugs(72, 6, step_name << ": peer " << host << ", offset: " << |
746 | fetch->offset << " size: " << size << "."); | |
e13ee7ad | 747 | |
748 | /* continue checking (with pd and host known and valid) */ | |
62e76326 | 749 | |
e13ee7ad | 750 | if (!reason) { |
62e76326 | 751 | if (!cbdataReferenceValid(pd->peer)) |
752 | reason = "peer disappeared"; | |
753 | else if (size < 0) | |
754 | reason = "swap failure"; | |
755 | else if (!fetch->entry) | |
756 | reason = "swap aborted?!"; | |
757 | else if (EBIT_TEST(fetch->entry->flags, ENTRY_ABORTED)) | |
758 | reason = "swap aborted"; | |
e13ee7ad | 759 | } |
62e76326 | 760 | |
e13ee7ad | 761 | /* continue checking (maybe-successful eof case) */ |
762 | if (!reason && !size) { | |
62e76326 | 763 | if (!pd->cd) |
764 | reason = "null digest?!"; | |
765 | else if (fetch->mask_offset != (off_t)pd->cd->mask_size) | |
766 | reason = "premature end of digest?!"; | |
767 | else if (!peerDigestUseful(pd)) | |
768 | reason = "useless digest"; | |
769 | else | |
770 | reason = no_bug = "success"; | |
e13ee7ad | 771 | } |
62e76326 | 772 | |
e13ee7ad | 773 | /* finish if we have a reason */ |
9b7de833 | 774 | if (reason) { |
62e76326 | 775 | const int level = strstr(reason, "?!") ? 1 : 3; |
776 | debug(72, level) ("%s: peer %s, exiting after '%s'\n", | |
777 | step_name, host, reason); | |
778 | peerDigestReqFinish(fetch, buf, | |
779 | 1, pdcb_valid, pcb_valid, reason, !no_bug); | |
e13ee7ad | 780 | } else { |
62e76326 | 781 | /* paranoid check */ |
782 | assert(pdcb_valid && pcb_valid); | |
e13ee7ad | 783 | } |
62e76326 | 784 | |
e13ee7ad | 785 | return reason != NULL; |
786 | } | |
787 | ||
551e60a9 | 788 | /* call this when all callback data is valid and fetch must be stopped but |
789 | * no error has occurred (e.g. we received 304 reply and reuse old digest) */ | |
790 | static void | |
791 | peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason) | |
792 | { | |
793 | assert(reason); | |
794 | debug(72, 2) ("peerDigestFetchStop: peer %s, reason: %s\n", | |
62e76326 | 795 | fetch->pd->host.buf(), reason); |
551e60a9 | 796 | peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 0); |
797 | } | |
798 | ||
e13ee7ad | 799 | /* call this when all callback data is valid but something bad happened */ |
800 | static void | |
801 | peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason) | |
802 | { | |
551e60a9 | 803 | assert(reason); |
804 | debug(72, 2) ("peerDigestFetchAbort: peer %s, reason: %s\n", | |
62e76326 | 805 | fetch->pd->host.buf(), reason); |
e13ee7ad | 806 | peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 1); |
807 | } | |
808 | ||
809 | /* complete the digest transfer, update stats, unlock/release everything */ | |
810 | static void | |
811 | peerDigestReqFinish(DigestFetchState * fetch, char *buf, | |
62e76326 | 812 | int fcb_valid, int pdcb_valid, int pcb_valid, |
813 | const char *reason, int err) | |
e13ee7ad | 814 | { |
815 | assert(reason); | |
816 | ||
817 | /* must go before peerDigestPDFinish */ | |
62e76326 | 818 | |
e13ee7ad | 819 | if (pdcb_valid) { |
62e76326 | 820 | fetch->pd->flags.requested = 0; |
821 | fetch->pd->req_result = reason; | |
e13ee7ad | 822 | } |
62e76326 | 823 | |
e13ee7ad | 824 | /* schedule next check if peer is still out there */ |
825 | if (pcb_valid) { | |
62e76326 | 826 | PeerDigest *pd = fetch->pd; |
827 | ||
828 | if (err) { | |
829 | pd->times.retry_delay = peerDigestIncDelay(pd); | |
830 | peerDigestSetCheck(pd, pd->times.retry_delay); | |
831 | } else { | |
832 | pd->times.retry_delay = 0; | |
833 | peerDigestSetCheck(pd, peerDigestNewDelay(fetch->entry)); | |
834 | } | |
9b7de833 | 835 | } |
62e76326 | 836 | |
e13ee7ad | 837 | /* note: order is significant */ |
838 | if (fcb_valid) | |
62e76326 | 839 | peerDigestFetchSetStats(fetch); |
840 | ||
e13ee7ad | 841 | if (pdcb_valid) |
62e76326 | 842 | peerDigestPDFinish(fetch, pcb_valid, err); |
843 | ||
e13ee7ad | 844 | if (fcb_valid) |
62e76326 | 845 | peerDigestFetchFinish(fetch, err); |
9b7de833 | 846 | } |
847 | ||
e13ee7ad | 848 | |
849 | /* destroys digest if peer disappeared | |
850 | * must be called only when fetch and pd cbdata are valid */ | |
9b7de833 | 851 | static void |
e13ee7ad | 852 | peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err) |
9b7de833 | 853 | { |
e13ee7ad | 854 | PeerDigest *pd = fetch->pd; |
528b2c61 | 855 | const char *host = pd->host.buf(); |
e13ee7ad | 856 | |
857 | pd->times.received = squid_curtime; | |
858 | pd->times.req_delay = fetch->resp_time; | |
8a6218c6 | 859 | kb_incr(&pd->stats.sent.kbytes, (size_t) fetch->sent.bytes); |
860 | kb_incr(&pd->stats.recv.kbytes, (size_t) fetch->recv.bytes); | |
e13ee7ad | 861 | pd->stats.sent.msgs += fetch->sent.msg; |
862 | pd->stats.recv.msgs += fetch->recv.msg; | |
863 | ||
864 | if (err) { | |
62e76326 | 865 | debug(72, 1) ("%sdisabling (%s) digest from %s\n", |
866 | pcb_valid ? "temporary " : "", | |
867 | pd->req_result, host); | |
868 | ||
869 | if (pd->cd) { | |
870 | cacheDigestDestroy(pd->cd); | |
871 | pd->cd = NULL; | |
872 | } | |
873 | ||
874 | pd->flags.usable = 0; | |
875 | ||
876 | if (!pcb_valid) | |
877 | peerDigestNotePeerGone(pd); | |
e13ee7ad | 878 | } else { |
62e76326 | 879 | assert(pcb_valid); |
880 | ||
881 | pd->flags.usable = 1; | |
e13ee7ad | 882 | |
62e76326 | 883 | /* XXX: ugly condition, but how? */ |
e13ee7ad | 884 | |
62e76326 | 885 | if (fetch->entry->store_status == STORE_OK) |
886 | debug(72, 2) ("re-used old digest from %s\n", host); | |
887 | else | |
888 | debug(72, 2) ("received valid digest from %s\n", host); | |
d1cdaa16 | 889 | } |
62e76326 | 890 | |
fa80a8ef | 891 | cbdataReferenceDone(fetch->pd); |
e13ee7ad | 892 | } |
893 | ||
894 | /* free fetch state structures | |
895 | * must be called only when fetch cbdata is valid */ | |
896 | static void | |
897 | peerDigestFetchFinish(DigestFetchState * fetch, int err) | |
898 | { | |
899 | assert(fetch->entry && fetch->request); | |
900 | ||
9b7de833 | 901 | if (fetch->old_entry) { |
62e76326 | 902 | debug(72, 2) ("peerDigestFetchFinish: deleting old entry\n"); |
8121ba82 | 903 | storeUnregister(fetch->old_sc, fetch->old_entry, fetch); |
62e76326 | 904 | storeReleaseRequest(fetch->old_entry); |
905 | storeUnlockObject(fetch->old_entry); | |
906 | fetch->old_entry = NULL; | |
9b7de833 | 907 | } |
62e76326 | 908 | |
1543ab6c | 909 | /* update global stats */ |
83704487 | 910 | kb_incr(&statCounter.cd.kbytes_sent, (size_t) fetch->sent.bytes); |
62e76326 | 911 | |
83704487 | 912 | kb_incr(&statCounter.cd.kbytes_recv, (size_t) fetch->recv.bytes); |
62e76326 | 913 | |
83704487 | 914 | statCounter.cd.msgs_sent += fetch->sent.msg; |
62e76326 | 915 | |
83704487 | 916 | statCounter.cd.msgs_recv += fetch->recv.msg; |
e13ee7ad | 917 | |
1543ab6c | 918 | /* unlock everything */ |
06d2839d | 919 | storeUnregister(fetch->sc, fetch->entry, fetch); |
62e76326 | 920 | |
9b7de833 | 921 | storeUnlockObject(fetch->entry); |
62e76326 | 922 | |
903c39e4 | 923 | requestUnlink(fetch->request); |
62e76326 | 924 | |
9b7de833 | 925 | fetch->entry = NULL; |
62e76326 | 926 | |
903c39e4 | 927 | fetch->request = NULL; |
62e76326 | 928 | |
3855c318 | 929 | assert(fetch->pd == NULL); |
62e76326 | 930 | |
9b7de833 | 931 | cbdataFree(fetch); |
9b7de833 | 932 | } |
933 | ||
e13ee7ad | 934 | /* calculate fetch stats after completion */ |
935 | static void | |
936 | peerDigestFetchSetStats(DigestFetchState * fetch) | |
937 | { | |
938 | MemObject *mem; | |
939 | assert(fetch->entry && fetch->request); | |
940 | ||
941 | mem = fetch->entry->mem_obj; | |
942 | assert(mem); | |
943 | ||
944 | /* XXX: outgoing numbers are not precise */ | |
945 | /* XXX: we must distinguish between 304 hits and misses here */ | |
946 | fetch->sent.bytes = httpRequestPrefixLen(fetch->request); | |
528b2c61 | 947 | /* XXX: this is slightly wrong: we don't KNOW that the entire memobject |
948 | * was fetched. We only know how big it is | |
949 | */ | |
950 | fetch->recv.bytes = mem->size(); | |
e13ee7ad | 951 | fetch->sent.msg = fetch->recv.msg = 1; |
952 | fetch->expires = fetch->entry->expires; | |
953 | fetch->resp_time = squid_curtime - fetch->start_time; | |
954 | ||
955 | debug(72, 3) ("peerDigestFetchFinish: recv %d bytes in %d secs\n", | |
62e76326 | 956 | fetch->recv.bytes, (int) fetch->resp_time); |
38650cc8 | 957 | debug(72, 3) ("peerDigestFetchFinish: expires: %ld (%+d), lmt: %ld (%+d)\n", |
62e76326 | 958 | (long int) fetch->expires, (int) (fetch->expires - squid_curtime), |
959 | (long int) fetch->entry->lastmod, (int) (fetch->entry->lastmod - squid_curtime)); | |
e13ee7ad | 960 | } |
961 | ||
962 | ||
9b7de833 | 963 | static int |
8a6218c6 | 964 | peerDigestSetCBlock(PeerDigest * pd, const char *buf) |
9b7de833 | 965 | { |
966 | StoreDigestCBlock cblock; | |
967 | int freed_size = 0; | |
528b2c61 | 968 | const char *host = pd->host.buf(); |
e13ee7ad | 969 | |
9b7de833 | 970 | xmemcpy(&cblock, buf, sizeof(cblock)); |
971 | /* network -> host conversions */ | |
972 | cblock.ver.current = ntohs(cblock.ver.current); | |
973 | cblock.ver.required = ntohs(cblock.ver.required); | |
974 | cblock.capacity = ntohl(cblock.capacity); | |
975 | cblock.count = ntohl(cblock.count); | |
976 | cblock.del_count = ntohl(cblock.del_count); | |
977 | cblock.mask_size = ntohl(cblock.mask_size); | |
4b4cd312 | 978 | debug(72, 2) ("got digest cblock from %s; ver: %d (req: %d)\n", |
62e76326 | 979 | host, (int) cblock.ver.current, (int) cblock.ver.required); |
4b4cd312 | 980 | debug(72, 2) ("\t size: %d bytes, e-cnt: %d, e-util: %d%%\n", |
62e76326 | 981 | cblock.mask_size, cblock.count, |
982 | xpercentInt(cblock.count, cblock.capacity)); | |
6106c6fc | 983 | /* check version requirements (both ways) */ |
62e76326 | 984 | |
9b7de833 | 985 | if (cblock.ver.required > CacheDigestVer.current) { |
62e76326 | 986 | debug(72, 1) ("%s digest requires version %d; have: %d\n", |
987 | host, cblock.ver.required, CacheDigestVer.current); | |
988 | return 0; | |
9b7de833 | 989 | } |
62e76326 | 990 | |
6106c6fc | 991 | if (cblock.ver.current < CacheDigestVer.required) { |
62e76326 | 992 | debug(72, 1) ("%s digest is version %d; we require: %d\n", |
993 | host, cblock.ver.current, CacheDigestVer.required); | |
994 | return 0; | |
6106c6fc | 995 | } |
62e76326 | 996 | |
9b7de833 | 997 | /* check consistency */ |
4b4cd312 | 998 | if (cblock.ver.required > cblock.ver.current || |
62e76326 | 999 | cblock.mask_size <= 0 || cblock.capacity <= 0 || |
1000 | cblock.bits_per_entry <= 0 || cblock.hash_func_count <= 0) { | |
1001 | debug(72, 0) ("%s digest cblock is corrupted.\n", host); | |
1002 | return 0; | |
9b7de833 | 1003 | } |
62e76326 | 1004 | |
d1cdaa16 | 1005 | /* check consistency further */ |
e6ccf245 | 1006 | if ((size_t)cblock.mask_size != cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry)) { |
e4049756 | 1007 | debugs(72, 0, host << " digest cblock is corrupted " << |
1008 | "(mask size mismatch: " << cblock.mask_size << " ? " << | |
1009 | cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry) | |
1010 | << ")."); | |
62e76326 | 1011 | return 0; |
d1cdaa16 | 1012 | } |
62e76326 | 1013 | |
d1cdaa16 | 1014 | /* there are some things we cannot do yet */ |
1015 | if (cblock.hash_func_count != CacheDigestHashFuncCount) { | |
62e76326 | 1016 | debug(72, 0) ("%s digest: unsupported #hash functions: %d ? %d.\n", |
1017 | host, cblock.hash_func_count, CacheDigestHashFuncCount); | |
1018 | return 0; | |
d1cdaa16 | 1019 | } |
62e76326 | 1020 | |
9b7de833 | 1021 | /* |
1022 | * no cblock bugs below this point | |
1023 | */ | |
1024 | /* check size changes */ | |
e6ccf245 | 1025 | if (pd->cd && cblock.mask_size != (ssize_t)pd->cd->mask_size) { |
e4049756 | 1026 | debugs(72, 2, host << " digest changed size: " << cblock.mask_size << |
1027 | " -> " << pd->cd->mask_size); | |
62e76326 | 1028 | freed_size = pd->cd->mask_size; |
1029 | cacheDigestDestroy(pd->cd); | |
1030 | pd->cd = NULL; | |
9b7de833 | 1031 | } |
62e76326 | 1032 | |
e13ee7ad | 1033 | if (!pd->cd) { |
62e76326 | 1034 | debug(72, 2) ("creating %s digest; size: %d (%+d) bytes\n", |
1035 | host, cblock.mask_size, (int) (cblock.mask_size - freed_size)); | |
1036 | pd->cd = cacheDigestCreate(cblock.capacity, cblock.bits_per_entry); | |
1037 | ||
1038 | if (cblock.mask_size >= freed_size) | |
1039 | kb_incr(&statCounter.cd.memory, cblock.mask_size - freed_size); | |
9b7de833 | 1040 | } |
62e76326 | 1041 | |
e13ee7ad | 1042 | assert(pd->cd); |
9b7de833 | 1043 | /* these assignments leave us in an inconsistent state until we finish reading the digest */ |
e13ee7ad | 1044 | pd->cd->count = cblock.count; |
1045 | pd->cd->del_count = cblock.del_count; | |
9b7de833 | 1046 | return 1; |
1047 | } | |
1048 | ||
9b7de833 | 1049 | static int |
8a6218c6 | 1050 | peerDigestUseful(const PeerDigest * pd) |
9b7de833 | 1051 | { |
d1cdaa16 | 1052 | /* TODO: we should calculate the prob of a false hit instead of bit util */ |
e13ee7ad | 1053 | const int bit_util = cacheDigestBitUtil(pd->cd); |
62e76326 | 1054 | |
e13ee7ad | 1055 | if (bit_util > 65) { |
62e76326 | 1056 | debug(72, 0) ("Warning: %s peer digest has too many bits on (%d%%).\n", |
1057 | pd->host.buf(), bit_util); | |
1058 | return 0; | |
9b7de833 | 1059 | } |
62e76326 | 1060 | |
9b7de833 | 1061 | return 1; |
1062 | } | |
7f6eb0fe | 1063 | |
e13ee7ad | 1064 | static int |
1065 | saneDiff(time_t diff) | |
1066 | { | |
8a6218c6 | 1067 | return abs(diff) > squid_curtime / 2 ? 0 : diff; |
e13ee7ad | 1068 | } |
1069 | ||
1070 | void | |
8a6218c6 | 1071 | peerDigestStatsReport(const PeerDigest * pd, StoreEntry * e) |
e13ee7ad | 1072 | { |
1073 | #define f2s(flag) (pd->flags.flag ? "yes" : "no") | |
38650cc8 | 1074 | #define appendTime(tm) storeAppendPrintf(e, "%s\t %10ld\t %+d\t %+d\n", \ |
1075 | ""#tm, (long int)pd->times.tm, \ | |
e13ee7ad | 1076 | saneDiff(pd->times.tm - squid_curtime), \ |
1077 | saneDiff(pd->times.tm - pd->times.initialized)) | |
1078 | ||
e13ee7ad | 1079 | assert(pd); |
1080 | ||
528b2c61 | 1081 | const char *host = pd->host.buf(); |
e13ee7ad | 1082 | storeAppendPrintf(e, "\npeer digest from %s\n", host); |
1083 | ||
1084 | cacheDigestGuessStatsReport(&pd->stats.guess, e, host); | |
1085 | ||
1086 | storeAppendPrintf(e, "\nevent\t timestamp\t secs from now\t secs from init\n"); | |
1087 | appendTime(initialized); | |
1088 | appendTime(needed); | |
1089 | appendTime(requested); | |
1090 | appendTime(received); | |
1091 | appendTime(next_check); | |
1092 | ||
1093 | storeAppendPrintf(e, "peer digest state:\n"); | |
1094 | storeAppendPrintf(e, "\tneeded: %3s, usable: %3s, requested: %3s\n", | |
62e76326 | 1095 | f2s(needed), f2s(usable), f2s(requested)); |
e13ee7ad | 1096 | storeAppendPrintf(e, "\n\tlast retry delay: %d secs\n", |
62e76326 | 1097 | (int) pd->times.retry_delay); |
e13ee7ad | 1098 | storeAppendPrintf(e, "\tlast request response time: %d secs\n", |
62e76326 | 1099 | (int) pd->times.req_delay); |
e13ee7ad | 1100 | storeAppendPrintf(e, "\tlast request result: %s\n", |
62e76326 | 1101 | pd->req_result ? pd->req_result : "(none)"); |
e13ee7ad | 1102 | |
1103 | storeAppendPrintf(e, "\npeer digest traffic:\n"); | |
1104 | storeAppendPrintf(e, "\trequests sent: %d, volume: %d KB\n", | |
62e76326 | 1105 | pd->stats.sent.msgs, (int) pd->stats.sent.kbytes.kb); |
e13ee7ad | 1106 | storeAppendPrintf(e, "\treplies recv: %d, volume: %d KB\n", |
62e76326 | 1107 | pd->stats.recv.msgs, (int) pd->stats.recv.kbytes.kb); |
e13ee7ad | 1108 | |
1109 | storeAppendPrintf(e, "\npeer digest structure:\n"); | |
62e76326 | 1110 | |
e13ee7ad | 1111 | if (pd->cd) |
62e76326 | 1112 | cacheDigestReport(pd->cd, host, e); |
e13ee7ad | 1113 | else |
62e76326 | 1114 | storeAppendPrintf(e, "\tno in-memory copy\n"); |
e13ee7ad | 1115 | } |
1116 | ||
7f6eb0fe | 1117 | #endif |