]>
Commit | Line | Data |
---|---|---|
9b7de833 | 1 | |
2 | /* | |
7bd7a9ee | 3 | * $Id: peer_digest.cc,v 1.59 1998/11/14 18:46:26 rousskov Exp $ |
9b7de833 | 4 | * |
5 | * DEBUG: section 72 Peer Digest Routines | |
6 | * AUTHOR: Alex Rousskov | |
7 | * | |
8 | * SQUID Internet Object Cache http://squid.nlanr.net/Squid/ | |
e25c139f | 9 | * ---------------------------------------------------------- |
9b7de833 | 10 | * |
11 | * Squid is the result of efforts by numerous individuals from the | |
12 | * Internet community. Development is led by Duane Wessels of the | |
e25c139f | 13 | * National Laboratory for Applied Network Research and funded by the |
14 | * National Science Foundation. Squid is Copyrighted (C) 1998 by | |
15 | * Duane Wessels and the University of California San Diego. Please | |
16 | * see the COPYRIGHT file for full details. Squid incorporates | |
17 | * software developed and/or copyrighted by other sources. Please see | |
18 | * the CREDITS file for full details. | |
9b7de833 | 19 | * |
20 | * This program is free software; you can redistribute it and/or modify | |
21 | * it under the terms of the GNU General Public License as published by | |
22 | * the Free Software Foundation; either version 2 of the License, or | |
23 | * (at your option) any later version. | |
24 | * | |
25 | * This program is distributed in the hope that it will be useful, | |
26 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
27 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
28 | * GNU General Public License for more details. | |
29 | * | |
30 | * You should have received a copy of the GNU General Public License | |
31 | * along with this program; if not, write to the Free Software | |
cbdec147 | 32 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111, USA. |
e25c139f | 33 | * |
9b7de833 | 34 | */ |
35 | ||
36 | #include "squid.h" | |
37 | ||
6cfa8966 | 38 | #if USE_CACHE_DIGESTS |
7a2e5bb5 | 39 | |
9b7de833 | 40 | /* local types */ |
41 | ||
42 | /* local prototypes */ | |
e13ee7ad | 43 | static time_t peerDigestIncDelay(const PeerDigest * pd); |
44 | static time_t peerDigestNewDelay(const StoreEntry * e); | |
45 | static void peerDigestSetCheck(PeerDigest * pd, time_t delay); | |
46 | static EVH peerDigestCheck; | |
47 | static void peerDigestRequest(PeerDigest * pd); | |
c68e9c6b | 48 | static STCB peerDigestFetchReply; |
c68e9c6b | 49 | static STCB peerDigestSwapInHeaders; |
50 | static STCB peerDigestSwapInCBlock; | |
da407def | 51 | static STCB peerDigestSwapInMask; |
4b4cd312 | 52 | static int peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name); |
e13ee7ad | 53 | static void peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason); |
54 | static void peerDigestReqFinish(DigestFetchState * fetch, char *buf, int, int, int, const char *reason, int err); | |
55 | static void peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err); | |
56 | static void peerDigestFetchFinish(DigestFetchState * fetch, int err); | |
57 | static void peerDigestFetchSetStats(DigestFetchState * fetch); | |
58 | static int peerDigestSetCBlock(PeerDigest * pd, const char *buf); | |
59 | static int peerDigestUseful(const PeerDigest * pd); | |
395b813e | 60 | |
9b7de833 | 61 | |
62 | /* local constants */ | |
9d486b43 | 63 | |
9b7de833 | 64 | #define StoreDigestCBlockSize sizeof(StoreDigestCBlock) |
65 | ||
e13ee7ad | 66 | /* min interval for requesting digests from a given peer */ |
67 | static const time_t PeerDigestReqMinGap = 5 * 60; /* seconds */ | |
68 | /* min interval for requesting digests (cumulative request stream) */ | |
69 | static const time_t GlobDigestReqMinGap = 1 * 60; /* seconds */ | |
bd890734 | 70 | |
71 | /* local vars */ | |
9d486b43 | 72 | |
e13ee7ad | 73 | static time_t pd_last_req_time = 0; /* last call to Check */ |
9b7de833 | 74 | |
e13ee7ad | 75 | /* initialize peer digest */ |
76 | static void | |
77 | peerDigestInit(PeerDigest *pd, peer *p) | |
9b7de833 | 78 | { |
e13ee7ad | 79 | assert(pd && p); |
80 | ||
81 | memset(pd, 0, sizeof(*pd)); | |
82 | pd->peer = p; | |
83 | /* if peer disappears, we will know it's name */ | |
84 | stringInit(&pd->host, p->host); | |
85 | ||
86 | pd->times.initialized = squid_curtime; | |
9b7de833 | 87 | } |
88 | ||
4b4cd312 | 89 | static void |
e13ee7ad | 90 | peerDigestClean(PeerDigest *pd) |
9b7de833 | 91 | { |
e13ee7ad | 92 | assert(pd); |
93 | if (pd->cd) | |
94 | cacheDigestDestroy(pd->cd); | |
95 | stringClean(&pd->host); | |
9b7de833 | 96 | } |
97 | ||
e13ee7ad | 98 | /* allocate new peer digest, call Init, and lock everything */ |
99 | PeerDigest * | |
100 | peerDigestCreate(peer *p) | |
101 | { | |
102 | PeerDigest *pd; | |
103 | assert(p); | |
104 | /* cannot check cbdataValid(p) because p may not be locked yet */ | |
105 | ||
106 | pd = memAllocate(MEM_PEER_DIGEST); | |
107 | cbdataAdd(pd, MEM_PEER_DIGEST); | |
108 | peerDigestInit(pd, p); | |
109 | cbdataLock(pd->peer); /* we will use the peer */ | |
110 | ||
111 | return pd; | |
112 | } | |
113 | ||
114 | /* call Clean and free/unlock everything */ | |
115 | void | |
116 | peerDigestDestroy(PeerDigest *pd) | |
117 | { | |
118 | assert(pd); | |
119 | assert(cbdataValid(pd)); | |
120 | ||
121 | /* inform peer (if any) that we are gone */ | |
122 | if (cbdataValid(pd->peer)) | |
123 | peerNoteDigestGone(pd->peer); | |
124 | cbdataUnlock(pd->peer); /* must unlock, valid or not */ | |
125 | pd->peer = NULL; | |
126 | ||
127 | peerDigestClean(pd); | |
128 | cbdataFree(pd); | |
129 | } | |
130 | ||
131 | /* called by peer to indicate that somebody actually needs this digest */ | |
132 | void | |
133 | peerDigestNeeded(PeerDigest *pd) | |
134 | { | |
135 | assert(pd); | |
136 | assert(!pd->flags.needed); | |
137 | assert(!pd->cd); | |
138 | ||
139 | pd->flags.needed = 1; | |
140 | pd->times.needed = squid_curtime; | |
141 | peerDigestSetCheck(pd, 0); /* check asap */ | |
142 | } | |
143 | ||
144 | /* currently we do not have a reason to disable without destroying */ | |
145 | #if FUTURE_CODE | |
395b813e | 146 | /* disables peer for good */ |
147 | static void | |
e13ee7ad | 148 | peerDigestDisable(PeerDigest *pd) |
395b813e | 149 | { |
e13ee7ad | 150 | debug(72, 2) ("peerDigestDisable: peer %s disabled for good\n", |
151 | strBuf(pd->host)); | |
152 | pd->times.disabled = squid_curtime; | |
153 | pd->times.next_check = -1; /* never */ | |
154 | pd->flags.usable = 0; | |
155 | ||
156 | if (pd->cd) { | |
157 | cacheDigestDestroy(pd->cd); | |
158 | pd->cd = NULL; | |
159 | } | |
160 | /* we do not destroy the pd itself to preserve its "history" and stats */ | |
395b813e | 161 | } |
e13ee7ad | 162 | #endif |
395b813e | 163 | |
e13ee7ad | 164 | /* increment retry delay [after an unsuccessful attempt] */ |
395b813e | 165 | static time_t |
e13ee7ad | 166 | peerDigestIncDelay(const PeerDigest *pd) |
395b813e | 167 | { |
e13ee7ad | 168 | assert(pd); |
169 | return pd->times.retry_delay > 0 ? | |
170 | 2*pd->times.retry_delay : /* exponential backoff */ | |
171 | PeerDigestReqMinGap; /* minimal delay */ | |
395b813e | 172 | } |
173 | ||
e13ee7ad | 174 | /* artificially increases Expires: setting to avoid race conditions |
175 | * returns the delay till that [increased] expiration time */ | |
00485c29 | 176 | static time_t |
e13ee7ad | 177 | peerDigestNewDelay(const StoreEntry * e) |
00485c29 | 178 | { |
e13ee7ad | 179 | assert(e); |
00485c29 | 180 | if (e->expires > 0) |
e13ee7ad | 181 | return e->expires + PeerDigestReqMinGap - squid_curtime; |
182 | return PeerDigestReqMinGap; | |
00485c29 | 183 | } |
184 | ||
e13ee7ad | 185 | /* registers next digest verification */ |
395b813e | 186 | static void |
e13ee7ad | 187 | peerDigestSetCheck(PeerDigest * pd, time_t delay) |
395b813e | 188 | { |
e13ee7ad | 189 | cbdataLock(pd); |
190 | eventAdd("peerDigestCheck", peerDigestCheck, pd, (double) delay, 1); | |
191 | pd->times.next_check = squid_curtime + delay; | |
192 | debug(72, 3) ("peerDigestSetCheck: will check peer %s in %d secs\n", | |
193 | strBuf(pd->host), delay); | |
194 | } | |
195 | ||
196 | /* called only when cbdataValid(pd) and | |
197 | * peer is about to disappear or have already disappeared */ | |
198 | void | |
199 | peerDigestNotePeerGone(PeerDigest *pd) { | |
200 | assert(cbdataValid(pd)); | |
201 | if (pd->flags.requested) { | |
202 | debug(72, 2) ("peerDigest: peer %s is gone, will destroy after fetch.\n", strBuf(pd->host)); | |
203 | /* do nothing now, the fetching chain will notice and take action */ | |
395b813e | 204 | } else { |
e13ee7ad | 205 | debug(72, 2) ("peerDigest: peer %s is gone, destroying now.\n", strBuf(pd->host)); |
206 | peerDigestDestroy(pd); | |
395b813e | 207 | } |
208 | } | |
209 | ||
e13ee7ad | 210 | /* callback for eventAdd() (with peer digest locked) |
211 | * request new digest if our copy is too old or if we lack one; | |
212 | * schedule next check otherwise */ | |
9b7de833 | 213 | static void |
e13ee7ad | 214 | peerDigestCheck(void *data) |
9b7de833 | 215 | { |
e13ee7ad | 216 | PeerDigest *pd = data; |
217 | time_t req_time; | |
218 | ||
219 | assert(pd); | |
220 | ||
221 | if (!cbdataValid(pd)) { | |
222 | cbdataUnlock(pd); | |
9b7de833 | 223 | return; |
224 | } | |
e13ee7ad | 225 | cbdataUnlock(pd); /* non-blocking event is over */ |
226 | ||
227 | assert(!pd->flags.requested); | |
228 | pd->times.next_check = 0; /* unknown */ | |
229 | ||
230 | if (!cbdataValid(pd->peer)) { | |
231 | peerDigestNotePeerGone(pd); | |
232 | return; | |
9b7de833 | 233 | } |
e13ee7ad | 234 | |
235 | debug(72, 3) ("peerDigestCheck: peer %s:%d\n", pd->peer->host, pd->peer->http_port); | |
236 | debug(72, 3) ("peerDigestCheck: time: %d, last received: %d (%+d)\n", | |
237 | squid_curtime, pd->times.received, (squid_curtime-pd->times.received)); | |
238 | ||
239 | /* decide when we should send the request: | |
240 | * request now unless too close to other requests */ | |
241 | req_time = squid_curtime; | |
242 | ||
243 | /* per-peer limit */ | |
244 | if (req_time - pd->times.received < PeerDigestReqMinGap) { | |
245 | debug(72, 2) ("peerDigestCheck: %s, avoiding close peer requests (%d < %d secs).\n", | |
246 | strBuf(pd->host), req_time - pd->times.received, | |
247 | PeerDigestReqMinGap); | |
248 | req_time = pd->times.received + PeerDigestReqMinGap; | |
bd890734 | 249 | } |
e13ee7ad | 250 | /* global limit */ |
251 | if (req_time - pd_last_req_time < GlobDigestReqMinGap) { | |
252 | debug(72, 2) ("peerDigestCheck: %s, avoiding close requests (%d < %d secs).\n", | |
253 | strBuf(pd->host), req_time - pd_last_req_time, | |
254 | GlobDigestReqMinGap); | |
255 | req_time = pd_last_req_time + GlobDigestReqMinGap; | |
9b7de833 | 256 | } |
e13ee7ad | 257 | |
258 | if (req_time <= squid_curtime) | |
259 | peerDigestRequest(pd); /* will set pd->flags.requested */ | |
260 | else | |
261 | peerDigestSetCheck(pd, req_time - squid_curtime); | |
9b7de833 | 262 | } |
263 | ||
e13ee7ad | 264 | /* ask store for a digest */ |
9b7de833 | 265 | static void |
e13ee7ad | 266 | peerDigestRequest(PeerDigest * pd) |
9b7de833 | 267 | { |
e13ee7ad | 268 | peer *p = pd->peer; |
9b7de833 | 269 | StoreEntry *e, *old_e; |
270 | char *url; | |
9d486b43 | 271 | const cache_key *key; |
9b7de833 | 272 | request_t *req; |
273 | DigestFetchState *fetch = NULL; | |
e13ee7ad | 274 | |
275 | pd->req_result = NULL; | |
276 | pd->flags.requested = 1; | |
277 | ||
9b7de833 | 278 | /* compute future request components */ |
e13ee7ad | 279 | url = internalRemoteUri(p->host, p->http_port, "/squid-internal-periodic/", StoreDigestFileName); |
9d486b43 | 280 | key = storeKeyPublic(url, METHOD_GET); |
281 | debug(72, 2) ("peerDigestRequest: %s key: %s\n", url, storeKeyText(key)); | |
50a97cee | 282 | req = urlParse(METHOD_GET, url); |
e13ee7ad | 283 | assert(req); |
284 | ||
9b7de833 | 285 | /* add custom headers */ |
2246b732 | 286 | assert(!req->header.len); |
4820f62b | 287 | httpHeaderPutStr(&req->header, HDR_ACCEPT, StoreDigestMimeStr); |
288 | httpHeaderPutStr(&req->header, HDR_ACCEPT, "text/html"); | |
e13ee7ad | 289 | |
9b7de833 | 290 | /* create fetch state structure */ |
291 | fetch = memAllocate(MEM_DIGEST_FETCH_STATE); | |
292 | cbdataAdd(fetch, MEM_DIGEST_FETCH_STATE); | |
903c39e4 | 293 | fetch->request = requestLink(req); |
e13ee7ad | 294 | fetch->pd = pd; |
295 | fetch->offset = 0; | |
296 | ||
297 | /* update timestamps */ | |
9b7de833 | 298 | fetch->start_time = squid_curtime; |
e13ee7ad | 299 | pd->times.requested = squid_curtime; |
300 | pd_last_req_time = squid_curtime; | |
301 | ||
92695e5e | 302 | req->flags.cachable = 1; |
9b7de833 | 303 | /* the rest is based on clientProcessExpired() */ |
92695e5e | 304 | req->flags.refresh = 1; |
9d486b43 | 305 | old_e = fetch->old_entry = storeGet(key); |
9b7de833 | 306 | if (old_e) { |
4b4cd312 | 307 | debug(72, 5) ("peerDigestRequest: found old entry\n"); |
9b7de833 | 308 | storeLockObject(old_e); |
309 | storeCreateMemObject(old_e, url, url); | |
310 | storeClientListAdd(old_e, fetch); | |
311 | } | |
312 | e = fetch->entry = storeCreateEntry(url, url, req->flags, req->method); | |
e13ee7ad | 313 | assert(EBIT_TEST(e->flags, KEY_PRIVATE)); |
9b7de833 | 314 | storeClientListAdd(e, fetch); |
315 | /* set lastmod to trigger IMS request if possible */ | |
316 | if (old_e) | |
317 | e->lastmod = old_e->lastmod; | |
e13ee7ad | 318 | |
9b7de833 | 319 | /* push towards peer cache */ |
e13ee7ad | 320 | debug(72, 3) ("peerDigestRequest: forwarding to fwdStart...\n"); |
5843eb62 | 321 | fwdStart(-1, e, req, no_addr); |
e13ee7ad | 322 | cbdataLock(fetch); |
323 | cbdataLock(fetch->pd); | |
324 | storeClientCopy(e, 0, 0, 4096, memAllocate(MEM_4K_BUF), | |
9b7de833 | 325 | peerDigestFetchReply, fetch); |
326 | } | |
327 | ||
e13ee7ad | 328 | /* wait for full http headers to be received then parse them */ |
9b7de833 | 329 | static void |
330 | peerDigestFetchReply(void *data, char *buf, ssize_t size) | |
331 | { | |
332 | DigestFetchState *fetch = data; | |
e13ee7ad | 333 | PeerDigest *pd = fetch->pd; |
334 | assert(pd && buf); | |
9b7de833 | 335 | assert(!fetch->offset); |
e13ee7ad | 336 | |
9b7de833 | 337 | if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply")) |
338 | return; | |
e13ee7ad | 339 | |
9b7de833 | 340 | if (headersEnd(buf, size)) { |
341 | http_status status; | |
342 | HttpReply *reply = fetch->entry->mem_obj->reply; | |
343 | assert(reply); | |
344 | httpReplyParse(reply, buf); | |
345 | status = reply->sline.status; | |
e13ee7ad | 346 | debug(72, 3) ("peerDigestFetchReply: %s status: %d, expires: %d (%+d)\n", |
347 | strBuf(pd->host), status, | |
348 | reply->expires, reply->expires-squid_curtime); | |
349 | ||
9b7de833 | 350 | /* this "if" is based on clientHandleIMSReply() */ |
351 | if (status == HTTP_NOT_MODIFIED) { | |
352 | request_t *r = NULL; | |
353 | /* our old entry is fine */ | |
354 | assert(fetch->old_entry); | |
355 | if (!fetch->old_entry->mem_obj->request) | |
356 | fetch->old_entry->mem_obj->request = r = | |
2920225f | 357 | requestLink(fetch->entry->mem_obj->request); |
358 | assert(fetch->old_entry->mem_obj->request); | |
9b7de833 | 359 | httpReplyUpdateOnNotModified(fetch->old_entry->mem_obj->reply, reply); |
360 | storeTimestampsSet(fetch->old_entry); | |
361 | /* get rid of 304 reply */ | |
362 | storeUnregister(fetch->entry, fetch); | |
9b7de833 | 363 | storeUnlockObject(fetch->entry); |
364 | fetch->entry = fetch->old_entry; | |
365 | fetch->old_entry = NULL; | |
2920225f | 366 | /* preserve request -- we need its size to update counters */ |
367 | /* requestUnlink(r); */ | |
368 | /* fetch->entry->mem_obj->request = NULL; */ | |
4b4cd312 | 369 | } else if (status == HTTP_OK) { |
9b7de833 | 370 | /* get rid of old entry if any */ |
371 | if (fetch->old_entry) { | |
e13ee7ad | 372 | debug(72, 3) ("peerDigestFetchReply: got new digest, releasing old one\n"); |
9b7de833 | 373 | storeUnregister(fetch->old_entry, fetch); |
374 | storeReleaseRequest(fetch->old_entry); | |
375 | storeUnlockObject(fetch->old_entry); | |
376 | fetch->old_entry = NULL; | |
377 | } | |
378 | } else { | |
379 | /* some kind of a bug */ | |
e13ee7ad | 380 | peerDigestFetchAbort(fetch, buf, httpStatusLineReason(&reply->sline)); |
9b7de833 | 381 | return; |
382 | } | |
383 | /* must have a ready-to-use store entry if we got here */ | |
e13ee7ad | 384 | /* can we stay with the old in-memory digest? */ |
385 | if (status == HTTP_NOT_MODIFIED && fetch->pd->cd) | |
7bd7a9ee | 386 | peerDigestFetchAbort(fetch, buf, "Not modified"); |
9b7de833 | 387 | else |
4b4cd312 | 388 | storeClientCopy(fetch->entry, /* have to swap in */ |
9b7de833 | 389 | 0, 0, SM_PAGE_SIZE, buf, peerDigestSwapInHeaders, fetch); |
9b7de833 | 390 | } else { |
391 | /* need more data, do we have space? */ | |
392 | if (size >= SM_PAGE_SIZE) | |
e13ee7ad | 393 | peerDigestFetchAbort(fetch, buf, "reply header too big"); |
9b7de833 | 394 | else |
395 | storeClientCopy(fetch->entry, size, 0, SM_PAGE_SIZE, buf, | |
396 | peerDigestFetchReply, fetch); | |
397 | } | |
398 | } | |
399 | ||
400 | /* fetch headers from disk, pass on to SwapInCBlock */ | |
401 | static void | |
402 | peerDigestSwapInHeaders(void *data, char *buf, ssize_t size) | |
403 | { | |
404 | DigestFetchState *fetch = data; | |
9b7de833 | 405 | size_t hdr_size; |
e13ee7ad | 406 | |
9b7de833 | 407 | if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInHeaders")) |
408 | return; | |
e13ee7ad | 409 | |
9d486b43 | 410 | assert(!fetch->offset); |
9b7de833 | 411 | if ((hdr_size = headersEnd(buf, size))) { |
412 | assert(fetch->entry->mem_obj->reply); | |
413 | if (!fetch->entry->mem_obj->reply->sline.status) | |
414 | httpReplyParse(fetch->entry->mem_obj->reply, buf); | |
dba2bbcd | 415 | if (fetch->entry->mem_obj->reply->sline.status != HTTP_OK) { |
416 | debug(72, 1) ("peerDigestSwapInHeaders: %s status %d got cached!\n", | |
e13ee7ad | 417 | strBuf(fetch->pd->host), fetch->entry->mem_obj->reply->sline.status); |
418 | peerDigestFetchAbort(fetch, buf, "internal status error"); | |
dba2bbcd | 419 | return; |
420 | } | |
9b7de833 | 421 | fetch->offset += hdr_size; |
422 | storeClientCopy(fetch->entry, size, fetch->offset, | |
423 | SM_PAGE_SIZE, buf, | |
424 | peerDigestSwapInCBlock, fetch); | |
425 | } else { | |
426 | /* need more data, do we have space? */ | |
427 | if (size >= SM_PAGE_SIZE) | |
e13ee7ad | 428 | peerDigestFetchAbort(fetch, buf, "stored header too big"); |
9b7de833 | 429 | else |
430 | storeClientCopy(fetch->entry, size, 0, SM_PAGE_SIZE, buf, | |
431 | peerDigestSwapInHeaders, fetch); | |
432 | } | |
433 | } | |
434 | ||
435 | static void | |
436 | peerDigestSwapInCBlock(void *data, char *buf, ssize_t size) | |
437 | { | |
438 | DigestFetchState *fetch = data; | |
e13ee7ad | 439 | |
9b7de833 | 440 | if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInCBlock")) |
441 | return; | |
e13ee7ad | 442 | |
9b7de833 | 443 | if (size >= StoreDigestCBlockSize) { |
e13ee7ad | 444 | PeerDigest *pd = fetch->pd; |
9d486b43 | 445 | HttpReply *rep = fetch->entry->mem_obj->reply; |
446 | const int seen = fetch->offset + size; | |
447 | ||
e13ee7ad | 448 | assert(pd && rep); |
449 | if (peerDigestSetCBlock(pd, buf)) { | |
450 | /* XXX: soon we will have variable header size */ | |
9b7de833 | 451 | fetch->offset += StoreDigestCBlockSize; |
e13ee7ad | 452 | /* switch to CD buffer and fetch digest guts */ |
da407def | 453 | memFree(MEM_4K_BUF, buf); |
454 | buf = NULL; | |
e13ee7ad | 455 | assert(pd->cd->mask); |
da407def | 456 | storeClientCopy(fetch->entry, |
457 | seen, | |
458 | fetch->offset, | |
e13ee7ad | 459 | pd->cd->mask_size, |
460 | pd->cd->mask, | |
9b7de833 | 461 | peerDigestSwapInMask, fetch); |
462 | } else { | |
e13ee7ad | 463 | peerDigestFetchAbort(fetch, buf, "invalid digest cblock"); |
9b7de833 | 464 | } |
465 | } else { | |
466 | /* need more data, do we have space? */ | |
467 | if (size >= SM_PAGE_SIZE) | |
e13ee7ad | 468 | peerDigestFetchAbort(fetch, buf, "digest cblock too big"); |
9b7de833 | 469 | else |
470 | storeClientCopy(fetch->entry, size, 0, SM_PAGE_SIZE, buf, | |
471 | peerDigestSwapInCBlock, fetch); | |
472 | } | |
473 | } | |
474 | ||
475 | static void | |
476 | peerDigestSwapInMask(void *data, char *buf, ssize_t size) | |
477 | { | |
478 | DigestFetchState *fetch = data; | |
e13ee7ad | 479 | PeerDigest *pd; |
480 | ||
481 | /* NOTE! buf points to the middle of pd->cd->mask! */ | |
da407def | 482 | if (peerDigestFetchedEnough(fetch, NULL, size, "peerDigestSwapInMask")) |
483 | return; | |
9d486b43 | 484 | |
e13ee7ad | 485 | pd = fetch->pd; |
486 | assert(pd->cd && pd->cd->mask); | |
9d486b43 | 487 | |
da407def | 488 | fetch->offset += size; |
489 | fetch->mask_offset += size; | |
e13ee7ad | 490 | if (fetch->mask_offset >= pd->cd->mask_size) { |
68814ffd | 491 | debug(72, 2) ("peerDigestSwapInMask: Done! Got %d, expected %d\n", |
e13ee7ad | 492 | fetch->mask_offset, pd->cd->mask_size); |
493 | assert(fetch->mask_offset == pd->cd->mask_size); | |
494 | assert(peerDigestFetchedEnough(fetch, NULL, 0, "peerDigestSwapInMask")); | |
495 | } else { | |
496 | const size_t buf_sz = pd->cd->mask_size - fetch->mask_offset; | |
497 | assert(buf_sz > 0); | |
498 | storeClientCopy(fetch->entry, | |
499 | fetch->offset, | |
500 | fetch->offset, | |
501 | buf_sz, | |
502 | pd->cd->mask + fetch->mask_offset, | |
503 | peerDigestSwapInMask, fetch); | |
9b7de833 | 504 | } |
505 | } | |
506 | ||
507 | static int | |
4b4cd312 | 508 | peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name) |
9b7de833 | 509 | { |
e13ee7ad | 510 | PeerDigest *pd = NULL; |
511 | const char *host = "<unknown>"; /* peer host */ | |
512 | const char *reason = NULL; /* reason for completion */ | |
513 | const char *no_bug = NULL; /* successful completion if set */ | |
9d486b43 | 514 | const int fcb_valid = cbdataValid(fetch); |
e13ee7ad | 515 | const int pdcb_valid = fcb_valid && cbdataValid(fetch->pd); |
516 | const int pcb_valid = pdcb_valid && cbdataValid(fetch->pd->peer); | |
517 | ||
518 | /* test possible exiting conditions (the same for most steps!) | |
519 | * cases marked with '?!' should not happen */ | |
520 | ||
521 | if (!reason) { | |
522 | if (!fcb_valid) | |
523 | reason = "fetch aborted?!"; | |
524 | else if (!(pd = fetch->pd)) | |
525 | reason = "peer digest disappeared?!"; | |
526 | else if (!cbdataValid(pd)) | |
527 | reason = "invalidated peer digest?!"; | |
528 | else | |
529 | host = strBuf(pd->host); | |
530 | } | |
531 | ||
532 | debug(72, 6) ("%s: peer %s, offset: %d size: %d.\n", | |
533 | step_name, host, fcb_valid ? fetch->offset : -1, size); | |
534 | ||
535 | /* continue checking (with pd and host known and valid) */ | |
536 | if (!reason) { | |
537 | if (!cbdataValid(pd->peer)) | |
538 | reason = "peer disappeared"; | |
539 | else if (size < 0) | |
540 | reason = "swap failure"; | |
541 | else if (!fetch->entry) | |
542 | reason = "swap aborted?!"; | |
543 | else if (fetch->entry->store_status == STORE_ABORTED) | |
544 | reason = "swap aborted"; | |
545 | } | |
546 | ||
547 | /* continue checking (maybe-successful eof case) */ | |
548 | if (!reason && !size) { | |
549 | if (!pd->cd) | |
550 | reason = "null digest?!"; | |
551 | else if (fetch->mask_offset != pd->cd->mask_size) | |
552 | reason = "premature end of digest?!"; | |
553 | else if (!peerDigestUseful(pd)) | |
554 | reason = "useless digest"; | |
555 | else | |
556 | reason = no_bug = "success"; | |
557 | } | |
558 | ||
559 | /* finish if we have a reason */ | |
9b7de833 | 560 | if (reason) { |
e13ee7ad | 561 | const int level = strstr(reason, "?!") ? 1 : 3; |
562 | debug(72, level) ("%s: peer %s, exiting after '%s'\n", | |
563 | step_name, host, reason); | |
564 | peerDigestReqFinish(fetch, buf, | |
565 | fcb_valid, pdcb_valid, pcb_valid, reason, !no_bug); | |
566 | } else { | |
567 | /* paranoid check */ | |
568 | assert(fcb_valid && pdcb_valid && pcb_valid); | |
569 | } | |
570 | return reason != NULL; | |
571 | } | |
572 | ||
573 | /* call this when all callback data is valid but something bad happened */ | |
574 | static void | |
575 | peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason) | |
576 | { | |
577 | peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 1); | |
578 | } | |
579 | ||
580 | /* complete the digest transfer, update stats, unlock/release everything */ | |
581 | static void | |
582 | peerDigestReqFinish(DigestFetchState * fetch, char *buf, | |
583 | int fcb_valid, int pdcb_valid, int pcb_valid, | |
584 | const char *reason, int err) | |
585 | { | |
586 | assert(reason); | |
587 | ||
588 | /* must go before peerDigestPDFinish */ | |
589 | if (pdcb_valid) { | |
590 | fetch->pd->flags.requested = 0; | |
591 | fetch->pd->req_result = reason; | |
592 | } | |
593 | ||
594 | /* schedule next check if peer is still out there */ | |
595 | if (pcb_valid) { | |
596 | PeerDigest *pd = fetch->pd; | |
597 | if (err) { | |
598 | pd->times.retry_delay = peerDigestIncDelay(pd); | |
599 | peerDigestSetCheck(pd, pd->times.retry_delay); | |
9d486b43 | 600 | } else { |
e13ee7ad | 601 | pd->times.retry_delay = 0; |
602 | peerDigestSetCheck(pd, peerDigestNewDelay(fetch->entry)); | |
9d486b43 | 603 | } |
9b7de833 | 604 | } |
e13ee7ad | 605 | |
606 | /* note: order is significant */ | |
607 | if (fcb_valid) | |
608 | peerDigestFetchSetStats(fetch); | |
609 | if (pdcb_valid) | |
610 | peerDigestPDFinish(fetch, pcb_valid, err); | |
611 | if (fcb_valid) | |
612 | peerDigestFetchFinish(fetch, err); | |
613 | if (buf) | |
614 | memFree(MEM_4K_BUF, buf); | |
9b7de833 | 615 | } |
616 | ||
e13ee7ad | 617 | |
618 | /* destroys digest if peer disappeared | |
619 | * must be called only when fetch and pd cbdata are valid */ | |
9b7de833 | 620 | static void |
e13ee7ad | 621 | peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err) |
9b7de833 | 622 | { |
e13ee7ad | 623 | PeerDigest *pd = fetch->pd; |
624 | const char *host = strBuf(pd->host); | |
625 | ||
626 | pd->times.received = squid_curtime; | |
627 | pd->times.req_delay = fetch->resp_time; | |
628 | kb_incr(&pd->stats.sent.kbytes, (size_t)fetch->sent.bytes); | |
629 | kb_incr(&pd->stats.recv.kbytes, (size_t)fetch->recv.bytes); | |
630 | pd->stats.sent.msgs += fetch->sent.msg; | |
631 | pd->stats.recv.msgs += fetch->recv.msg; | |
632 | ||
633 | if (err) { | |
634 | debug(72, 1) ("%sdisabling (%s) digest from %s\n", | |
635 | pcb_valid ? "temporary " : "", | |
636 | pd->req_result, host); | |
637 | ||
638 | if (pd->cd) { | |
639 | cacheDigestDestroy(pd->cd); | |
640 | pd->cd = NULL; | |
641 | } | |
642 | ||
643 | pd->flags.usable = 0; | |
644 | ||
645 | if (!pcb_valid) | |
646 | peerDigestNotePeerGone(pd); | |
647 | } else { | |
648 | assert(pcb_valid); | |
649 | ||
650 | pd->flags.usable = 1; | |
651 | ||
652 | /* XXX: ugly condition, but how? */ | |
653 | if (fetch->entry->store_status == STORE_OK) | |
654 | debug(72, 2) ("re-used old digest from %s\n", host); | |
655 | else | |
656 | debug(72, 2) ("received valid digest from %s\n", host); | |
d1cdaa16 | 657 | } |
e13ee7ad | 658 | } |
659 | ||
660 | /* free fetch state structures | |
661 | * must be called only when fetch cbdata is valid */ | |
662 | static void | |
663 | peerDigestFetchFinish(DigestFetchState * fetch, int err) | |
664 | { | |
665 | assert(fetch->entry && fetch->request); | |
666 | ||
9b7de833 | 667 | if (fetch->old_entry) { |
4b4cd312 | 668 | debug(72, 2) ("peerDigestFetchFinish: deleting old entry\n"); |
9b7de833 | 669 | storeUnregister(fetch->old_entry, fetch); |
670 | storeReleaseRequest(fetch->old_entry); | |
671 | storeUnlockObject(fetch->old_entry); | |
672 | fetch->old_entry = NULL; | |
673 | } | |
e13ee7ad | 674 | |
1543ab6c | 675 | /* update global stats */ |
e13ee7ad | 676 | kb_incr(&Counter.cd.kbytes_sent, (size_t) fetch->sent.bytes); |
677 | kb_incr(&Counter.cd.kbytes_recv, (size_t) fetch->recv.bytes); | |
678 | Counter.cd.msgs_sent += fetch->sent.msg; | |
679 | Counter.cd.msgs_recv += fetch->recv.msg; | |
680 | ||
1543ab6c | 681 | /* unlock everything */ |
e13ee7ad | 682 | if (fetch->pd) |
683 | cbdataUnlock(fetch->pd); | |
9b7de833 | 684 | storeUnregister(fetch->entry, fetch); |
685 | storeUnlockObject(fetch->entry); | |
903c39e4 | 686 | requestUnlink(fetch->request); |
9b7de833 | 687 | fetch->entry = NULL; |
903c39e4 | 688 | fetch->request = NULL; |
9d486b43 | 689 | cbdataUnlock(fetch); |
9b7de833 | 690 | cbdataFree(fetch); |
9b7de833 | 691 | } |
692 | ||
e13ee7ad | 693 | /* calculate fetch stats after completion */ |
694 | static void | |
695 | peerDigestFetchSetStats(DigestFetchState * fetch) | |
696 | { | |
697 | MemObject *mem; | |
698 | assert(fetch->entry && fetch->request); | |
699 | ||
700 | mem = fetch->entry->mem_obj; | |
701 | assert(mem); | |
702 | ||
703 | /* XXX: outgoing numbers are not precise */ | |
704 | /* XXX: we must distinguish between 304 hits and misses here */ | |
705 | fetch->sent.bytes = httpRequestPrefixLen(fetch->request); | |
706 | fetch->recv.bytes = fetch->entry->store_status == STORE_PENDING ? | |
707 | mem->inmem_hi : mem->object_sz; | |
708 | fetch->sent.msg = fetch->recv.msg = 1; | |
709 | fetch->expires = fetch->entry->expires; | |
710 | fetch->resp_time = squid_curtime - fetch->start_time; | |
711 | ||
712 | debug(72, 3) ("peerDigestFetchFinish: recv %d bytes in %d secs\n", | |
713 | fetch->recv.bytes, fetch->resp_time); | |
714 | debug(72, 3) ("peerDigestFetchFinish: expires: %d (%+d), lmt: %d (%+d)\n", | |
715 | fetch->expires, fetch->expires-squid_curtime, | |
716 | fetch->entry->lastmod, fetch->entry->lastmod-squid_curtime); | |
717 | } | |
718 | ||
719 | ||
9b7de833 | 720 | static int |
e13ee7ad | 721 | peerDigestSetCBlock(PeerDigest *pd, const char *buf) |
9b7de833 | 722 | { |
723 | StoreDigestCBlock cblock; | |
724 | int freed_size = 0; | |
e13ee7ad | 725 | const char *host = strBuf(pd->host); |
726 | ||
9b7de833 | 727 | xmemcpy(&cblock, buf, sizeof(cblock)); |
728 | /* network -> host conversions */ | |
729 | cblock.ver.current = ntohs(cblock.ver.current); | |
730 | cblock.ver.required = ntohs(cblock.ver.required); | |
731 | cblock.capacity = ntohl(cblock.capacity); | |
732 | cblock.count = ntohl(cblock.count); | |
733 | cblock.del_count = ntohl(cblock.del_count); | |
734 | cblock.mask_size = ntohl(cblock.mask_size); | |
4b4cd312 | 735 | debug(72, 2) ("got digest cblock from %s; ver: %d (req: %d)\n", |
e13ee7ad | 736 | host, (int) cblock.ver.current, (int) cblock.ver.required); |
4b4cd312 | 737 | debug(72, 2) ("\t size: %d bytes, e-cnt: %d, e-util: %d%%\n", |
9b7de833 | 738 | cblock.mask_size, cblock.count, |
739 | xpercentInt(cblock.count, cblock.capacity)); | |
6106c6fc | 740 | /* check version requirements (both ways) */ |
9b7de833 | 741 | if (cblock.ver.required > CacheDigestVer.current) { |
4b4cd312 | 742 | debug(72, 1) ("%s digest requires version %d; have: %d\n", |
e13ee7ad | 743 | host, cblock.ver.required, CacheDigestVer.current); |
9b7de833 | 744 | return 0; |
745 | } | |
6106c6fc | 746 | if (cblock.ver.current < CacheDigestVer.required) { |
4b4cd312 | 747 | debug(72, 1) ("%s digest is version %d; we require: %d\n", |
e13ee7ad | 748 | host, cblock.ver.current, CacheDigestVer.required); |
6106c6fc | 749 | return 0; |
750 | } | |
9b7de833 | 751 | /* check consistency */ |
4b4cd312 | 752 | if (cblock.ver.required > cblock.ver.current || |
d1cdaa16 | 753 | cblock.mask_size <= 0 || cblock.capacity <= 0 || |
754 | cblock.bits_per_entry <= 0 || cblock.hash_func_count <= 0) { | |
e13ee7ad | 755 | debug(72, 0) ("%s digest cblock is corrupted.\n", host); |
9b7de833 | 756 | return 0; |
757 | } | |
d1cdaa16 | 758 | /* check consistency further */ |
759 | if (cblock.mask_size != cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry)) { | |
4b4cd312 | 760 | debug(72, 0) ("%s digest cblock is corrupted (mask size mismatch: %d ? %d).\n", |
e13ee7ad | 761 | host, cblock.mask_size, cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry)); |
d1cdaa16 | 762 | return 0; |
763 | } | |
764 | /* there are some things we cannot do yet */ | |
765 | if (cblock.hash_func_count != CacheDigestHashFuncCount) { | |
4b4cd312 | 766 | debug(72, 0) ("%s digest: unsupported #hash functions: %d ? %d.\n", |
e13ee7ad | 767 | host, cblock.hash_func_count, CacheDigestHashFuncCount); |
d1cdaa16 | 768 | return 0; |
769 | } | |
9b7de833 | 770 | /* |
771 | * no cblock bugs below this point | |
772 | */ | |
773 | /* check size changes */ | |
e13ee7ad | 774 | if (pd->cd && cblock.mask_size != pd->cd->mask_size) { |
4b4cd312 | 775 | debug(72, 2) ("%s digest changed size: %d -> %d\n", |
e13ee7ad | 776 | host, cblock.mask_size, pd->cd->mask_size); |
777 | freed_size = pd->cd->mask_size; | |
778 | cacheDigestDestroy(pd->cd); | |
779 | pd->cd = NULL; | |
9b7de833 | 780 | } |
e13ee7ad | 781 | if (!pd->cd) { |
45694534 | 782 | debug(72, 2) ("creating %s digest; size: %d (%+d) bytes\n", |
e13ee7ad | 783 | host, cblock.mask_size, (int) (cblock.mask_size - freed_size)); |
784 | pd->cd = cacheDigestCreate(cblock.capacity, cblock.bits_per_entry); | |
9b7de833 | 785 | if (cblock.mask_size >= freed_size) |
786 | kb_incr(&Counter.cd.memory, cblock.mask_size - freed_size); | |
787 | } | |
e13ee7ad | 788 | assert(pd->cd); |
9b7de833 | 789 | /* these assignments leave us in an inconsistent state until we finish reading the digest */ |
e13ee7ad | 790 | pd->cd->count = cblock.count; |
791 | pd->cd->del_count = cblock.del_count; | |
9b7de833 | 792 | return 1; |
793 | } | |
794 | ||
9b7de833 | 795 | static int |
e13ee7ad | 796 | peerDigestUseful(const PeerDigest *pd) |
9b7de833 | 797 | { |
d1cdaa16 | 798 | /* TODO: we should calculate the prob of a false hit instead of bit util */ |
e13ee7ad | 799 | const int bit_util = cacheDigestBitUtil(pd->cd); |
800 | if (bit_util > 65) { | |
4b4cd312 | 801 | debug(72, 0) ("Warning: %s peer digest has too many bits on (%d%%).\n", |
e13ee7ad | 802 | strBuf(pd->host), bit_util); |
d1cdaa16 | 803 | return 0; |
9b7de833 | 804 | } |
805 | return 1; | |
806 | } | |
7f6eb0fe | 807 | |
e13ee7ad | 808 | static int |
809 | saneDiff(time_t diff) | |
810 | { | |
811 | return abs(diff) > squid_curtime/2 ? 0 : diff; | |
812 | } | |
813 | ||
814 | void | |
815 | peerDigestStatsReport(const PeerDigest *pd, StoreEntry * e) | |
816 | { | |
817 | #define f2s(flag) (pd->flags.flag ? "yes" : "no") | |
818 | #define appendTime(tm) storeAppendPrintf(e, "%s\t %10d\t %+d\t %+d\n", \ | |
819 | ""#tm, pd->times.tm, \ | |
820 | saneDiff(pd->times.tm - squid_curtime), \ | |
821 | saneDiff(pd->times.tm - pd->times.initialized)) | |
822 | ||
823 | const char *host = pd ? strBuf(pd->host) : NULL; | |
824 | assert(pd); | |
825 | ||
826 | storeAppendPrintf(e, "\npeer digest from %s\n", host); | |
827 | ||
828 | cacheDigestGuessStatsReport(&pd->stats.guess, e, host); | |
829 | ||
830 | storeAppendPrintf(e, "\nevent\t timestamp\t secs from now\t secs from init\n"); | |
831 | appendTime(initialized); | |
832 | appendTime(needed); | |
833 | appendTime(requested); | |
834 | appendTime(received); | |
835 | appendTime(next_check); | |
836 | ||
837 | storeAppendPrintf(e, "peer digest state:\n"); | |
838 | storeAppendPrintf(e, "\tneeded: %3s, usable: %3s, requested: %3s\n", | |
839 | f2s(needed), f2s(usable), f2s(requested)); | |
840 | storeAppendPrintf(e, "\n\tlast retry delay: %d secs\n", | |
841 | pd->times.retry_delay); | |
842 | storeAppendPrintf(e, "\tlast request response time: %d secs\n", | |
843 | pd->times.req_delay); | |
844 | storeAppendPrintf(e, "\tlast request result: %s\n", | |
845 | pd->req_result ? pd->req_result : "(none)"); | |
846 | ||
847 | storeAppendPrintf(e, "\npeer digest traffic:\n"); | |
848 | storeAppendPrintf(e, "\trequests sent: %d, volume: %d KB\n", | |
849 | pd->stats.sent.msgs, (int) pd->stats.sent.kbytes.kb); | |
850 | storeAppendPrintf(e, "\treplies recv: %d, volume: %d KB\n", | |
851 | pd->stats.recv.msgs, (int) pd->stats.recv.kbytes.kb); | |
852 | ||
853 | storeAppendPrintf(e, "\npeer digest structure:\n"); | |
854 | if (pd->cd) | |
855 | cacheDigestReport(pd->cd, host, e); | |
856 | else | |
857 | storeAppendPrintf(e, "\tno in-memory copy\n"); | |
858 | } | |
859 | ||
7f6eb0fe | 860 | #endif |