]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 1996-2015 The Squid Software Foundation and contributors | |
3 | * | |
4 | * Squid software is distributed under GPLv2+ license and includes | |
5 | * contributions from numerous individuals and organizations. | |
6 | * Please see the COPYING and CONTRIBUTORS files for details. | |
7 | */ | |
8 | ||
9 | /* DEBUG: section 72 Peer Digest Routines */ | |
10 | ||
11 | #include "squid.h" | |
12 | #if USE_CACHE_DIGESTS | |
13 | #include "CacheDigest.h" | |
14 | #include "CachePeer.h" | |
15 | #include "event.h" | |
16 | #include "FwdState.h" | |
17 | #include "globals.h" | |
18 | #include "HttpReply.h" | |
19 | #include "HttpRequest.h" | |
20 | #include "internal.h" | |
21 | #include "MemObject.h" | |
22 | #include "mime_header.h" | |
23 | #include "neighbors.h" | |
24 | #include "PeerDigest.h" | |
25 | #include "SquidTime.h" | |
26 | #include "Store.h" | |
27 | #include "store_key_md5.h" | |
28 | #include "StoreClient.h" | |
29 | #include "tools.h" | |
30 | #include "util.h" | |
31 | ||
32 | /* local types */ | |
33 | ||
34 | /* local prototypes */ | |
35 | static time_t peerDigestIncDelay(const PeerDigest * pd); | |
36 | static time_t peerDigestNewDelay(const StoreEntry * e); | |
37 | static void peerDigestSetCheck(PeerDigest * pd, time_t delay); | |
38 | static void peerDigestClean(PeerDigest *); | |
39 | static EVH peerDigestCheck; | |
40 | static void peerDigestRequest(PeerDigest * pd); | |
41 | static STCB peerDigestHandleReply; | |
42 | static int peerDigestFetchReply(void *, char *, ssize_t); | |
43 | int peerDigestSwapInHeaders(void *, char *, ssize_t); | |
44 | int peerDigestSwapInCBlock(void *, char *, ssize_t); | |
45 | int peerDigestSwapInMask(void *, char *, ssize_t); | |
46 | static int peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name); | |
47 | static void peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason); | |
48 | static void peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason); | |
49 | static void peerDigestReqFinish(DigestFetchState * fetch, char *buf, int, int, int, const char *reason, int err); | |
50 | static void peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err); | |
51 | static void peerDigestFetchFinish(DigestFetchState * fetch, int err); | |
52 | static void peerDigestFetchSetStats(DigestFetchState * fetch); | |
53 | static int peerDigestSetCBlock(PeerDigest * pd, const char *buf); | |
54 | static int peerDigestUseful(const PeerDigest * pd); | |
55 | ||
56 | /* local constants */ | |
57 | Version const CacheDigestVer = { 5, 3 }; | |
58 | ||
59 | #define StoreDigestCBlockSize sizeof(StoreDigestCBlock) | |
60 | ||
61 | /* min interval for requesting digests from a given peer */ | |
62 | static const time_t PeerDigestReqMinGap = 5 * 60; /* seconds */ | |
63 | /* min interval for requesting digests (cumulative request stream) */ | |
64 | static const time_t GlobDigestReqMinGap = 1 * 60; /* seconds */ | |
65 | ||
66 | /* local vars */ | |
67 | ||
68 | static time_t pd_last_req_time = 0; /* last call to Check */ | |
69 | ||
70 | /* initialize peer digest */ | |
71 | static void | |
72 | peerDigestInit(PeerDigest * pd, CachePeer * p) | |
73 | { | |
74 | assert(pd && p); | |
75 | ||
76 | memset(pd, 0, sizeof(*pd)); | |
77 | /* | |
78 | * DPW 2007-04-12 | |
79 | * Lock on to the peer here. The corresponding cbdataReferenceDone() | |
80 | * is in peerDigestDestroy(). | |
81 | */ | |
82 | pd->peer = cbdataReference(p); | |
83 | /* if peer disappears, we will know it's name */ | |
84 | pd->host = p->host; | |
85 | ||
86 | pd->times.initialized = squid_curtime; | |
87 | } | |
88 | ||
89 | static void | |
90 | peerDigestClean(PeerDigest * pd) | |
91 | { | |
92 | assert(pd); | |
93 | ||
94 | if (pd->cd) | |
95 | cacheDigestDestroy(pd->cd); | |
96 | ||
97 | pd->host.clean(); | |
98 | } | |
99 | ||
100 | CBDATA_CLASS_INIT(PeerDigest); | |
101 | ||
102 | /* allocate new peer digest, call Init, and lock everything */ | |
103 | PeerDigest * | |
104 | peerDigestCreate(CachePeer * p) | |
105 | { | |
106 | PeerDigest *pd; | |
107 | assert(p); | |
108 | ||
109 | pd = new PeerDigest; | |
110 | peerDigestInit(pd, p); | |
111 | ||
112 | /* XXX This does not look right, and the same thing again in the caller */ | |
113 | return cbdataReference(pd); | |
114 | } | |
115 | ||
116 | /* call Clean and free/unlock everything */ | |
117 | static void | |
118 | peerDigestDestroy(PeerDigest * pd) | |
119 | { | |
120 | void *p; | |
121 | assert(pd); | |
122 | void * peerTmp = pd->peer; | |
123 | ||
124 | /* | |
125 | * DPW 2007-04-12 | |
126 | * We locked the peer in peerDigestInit(), this is | |
127 | * where we unlock it. If the peer is still valid, | |
128 | * tell it that the digest is gone. | |
129 | */ | |
130 | if (cbdataReferenceValidDone(peerTmp, &p)) | |
131 | peerNoteDigestGone((CachePeer *)p); | |
132 | ||
133 | peerDigestClean(pd); | |
134 | ||
135 | delete pd; | |
136 | } | |
137 | ||
138 | /* called by peer to indicate that somebody actually needs this digest */ | |
139 | void | |
140 | peerDigestNeeded(PeerDigest * pd) | |
141 | { | |
142 | assert(pd); | |
143 | assert(!pd->flags.needed); | |
144 | assert(!pd->cd); | |
145 | ||
146 | pd->flags.needed = true; | |
147 | pd->times.needed = squid_curtime; | |
148 | peerDigestSetCheck(pd, 0); /* check asap */ | |
149 | } | |
150 | ||
151 | /* currently we do not have a reason to disable without destroying */ | |
152 | #if FUTURE_CODE | |
153 | /* disables peer for good */ | |
154 | static void | |
155 | peerDigestDisable(PeerDigest * pd) | |
156 | { | |
157 | debugs(72, 2, "peerDigestDisable: peer " << pd->host.buf() << " disabled for good"); | |
158 | pd->times.disabled = squid_curtime; | |
159 | pd->times.next_check = -1; /* never */ | |
160 | pd->flags.usable = 0; | |
161 | ||
162 | if (pd->cd) { | |
163 | cacheDigestDestroy(pd->cd); | |
164 | pd->cd = NULL; | |
165 | } | |
166 | ||
167 | /* we do not destroy the pd itself to preserve its "history" and stats */ | |
168 | } | |
169 | ||
170 | #endif | |
171 | ||
172 | /* increment retry delay [after an unsuccessful attempt] */ | |
173 | static time_t | |
174 | peerDigestIncDelay(const PeerDigest * pd) | |
175 | { | |
176 | assert(pd); | |
177 | return pd->times.retry_delay > 0 ? | |
178 | 2 * pd->times.retry_delay : /* exponential backoff */ | |
179 | PeerDigestReqMinGap; /* minimal delay */ | |
180 | } | |
181 | ||
182 | /* artificially increases Expires: setting to avoid race conditions | |
183 | * returns the delay till that [increased] expiration time */ | |
184 | static time_t | |
185 | peerDigestNewDelay(const StoreEntry * e) | |
186 | { | |
187 | assert(e); | |
188 | ||
189 | if (e->expires > 0) | |
190 | return e->expires + PeerDigestReqMinGap - squid_curtime; | |
191 | ||
192 | return PeerDigestReqMinGap; | |
193 | } | |
194 | ||
195 | /* registers next digest verification */ | |
196 | static void | |
197 | peerDigestSetCheck(PeerDigest * pd, time_t delay) | |
198 | { | |
199 | eventAdd("peerDigestCheck", peerDigestCheck, pd, (double) delay, 1); | |
200 | pd->times.next_check = squid_curtime + delay; | |
201 | debugs(72, 3, "peerDigestSetCheck: will check peer " << pd->host << " in " << delay << " secs"); | |
202 | } | |
203 | ||
204 | /* | |
205 | * called when peer is about to disappear or have already disappeared | |
206 | */ | |
207 | void | |
208 | peerDigestNotePeerGone(PeerDigest * pd) | |
209 | { | |
210 | if (pd->flags.requested) { | |
211 | debugs(72, 2, "peerDigest: peer " << pd->host << " gone, will destroy after fetch."); | |
212 | /* do nothing now, the fetching chain will notice and take action */ | |
213 | } else { | |
214 | debugs(72, 2, "peerDigest: peer " << pd->host << " is gone, destroying now."); | |
215 | peerDigestDestroy(pd); | |
216 | } | |
217 | } | |
218 | ||
219 | /* callback for eventAdd() (with peer digest locked) | |
220 | * request new digest if our copy is too old or if we lack one; | |
221 | * schedule next check otherwise */ | |
222 | static void | |
223 | peerDigestCheck(void *data) | |
224 | { | |
225 | PeerDigest *pd = (PeerDigest *)data; | |
226 | time_t req_time; | |
227 | ||
228 | assert(!pd->flags.requested); | |
229 | ||
230 | pd->times.next_check = 0; /* unknown */ | |
231 | ||
232 | if (!cbdataReferenceValid(pd->peer)) { | |
233 | peerDigestNotePeerGone(pd); | |
234 | return; | |
235 | } | |
236 | ||
237 | debugs(72, 3, "peerDigestCheck: peer " << pd->peer->host << ":" << pd->peer->http_port); | |
238 | debugs(72, 3, "peerDigestCheck: time: " << squid_curtime << | |
239 | ", last received: " << (long int) pd->times.received << " (" << | |
240 | std::showpos << (int) (squid_curtime - pd->times.received) << ")"); | |
241 | ||
242 | /* decide when we should send the request: | |
243 | * request now unless too close to other requests */ | |
244 | req_time = squid_curtime; | |
245 | ||
246 | /* per-peer limit */ | |
247 | ||
248 | if (req_time - pd->times.received < PeerDigestReqMinGap) { | |
249 | debugs(72, 2, "peerDigestCheck: " << pd->host << | |
250 | ", avoiding close peer requests (" << | |
251 | (int) (req_time - pd->times.received) << " < " << | |
252 | (int) PeerDigestReqMinGap << " secs)."); | |
253 | ||
254 | req_time = pd->times.received + PeerDigestReqMinGap; | |
255 | } | |
256 | ||
257 | /* global limit */ | |
258 | if (req_time - pd_last_req_time < GlobDigestReqMinGap) { | |
259 | debugs(72, 2, "peerDigestCheck: " << pd->host << | |
260 | ", avoiding close requests (" << | |
261 | (int) (req_time - pd_last_req_time) << " < " << | |
262 | (int) GlobDigestReqMinGap << " secs)."); | |
263 | ||
264 | req_time = pd_last_req_time + GlobDigestReqMinGap; | |
265 | } | |
266 | ||
267 | if (req_time <= squid_curtime) | |
268 | peerDigestRequest(pd); /* will set pd->flags.requested */ | |
269 | else | |
270 | peerDigestSetCheck(pd, req_time - squid_curtime); | |
271 | } | |
272 | ||
273 | CBDATA_TYPE(DigestFetchState); | |
274 | ||
275 | /* ask store for a digest */ | |
276 | static void | |
277 | peerDigestRequest(PeerDigest * pd) | |
278 | { | |
279 | CachePeer *p = pd->peer; | |
280 | StoreEntry *e, *old_e; | |
281 | char *url = NULL; | |
282 | const cache_key *key; | |
283 | HttpRequest *req; | |
284 | DigestFetchState *fetch = NULL; | |
285 | StoreIOBuffer tempBuffer; | |
286 | ||
287 | pd->req_result = NULL; | |
288 | pd->flags.requested = true; | |
289 | ||
290 | /* compute future request components */ | |
291 | ||
292 | if (p->digest_url) | |
293 | url = xstrdup(p->digest_url); | |
294 | else | |
295 | url = xstrdup(internalRemoteUri(p->host, p->http_port, "/squid-internal-periodic/", StoreDigestFileName)); | |
296 | ||
297 | req = HttpRequest::CreateFromUrl(url); | |
298 | ||
299 | assert(req); | |
300 | ||
301 | key = storeKeyPublicByRequest(req); | |
302 | ||
303 | debugs(72, 2, "peerDigestRequest: " << url << " key: " << storeKeyText(key)); | |
304 | ||
305 | /* add custom headers */ | |
306 | assert(!req->header.len); | |
307 | ||
308 | req->header.putStr(HDR_ACCEPT, StoreDigestMimeStr); | |
309 | ||
310 | req->header.putStr(HDR_ACCEPT, "text/html"); | |
311 | ||
312 | if (p->login && | |
313 | p->login[0] != '*' && | |
314 | strcmp(p->login, "PASS") != 0 && | |
315 | strcmp(p->login, "PASSTHRU") != 0 && | |
316 | strcmp(p->login, "NEGOTIATE") != 0 && | |
317 | strcmp(p->login, "PROXYPASS") != 0) { | |
318 | req->url.userInfo(SBuf(p->login)); // XXX: performance regression make peer login SBuf as well. | |
319 | } | |
320 | /* create fetch state structure */ | |
321 | CBDATA_INIT_TYPE(DigestFetchState); | |
322 | ||
323 | fetch = cbdataAlloc(DigestFetchState); | |
324 | ||
325 | fetch->request = req; | |
326 | HTTPMSGLOCK(fetch->request); | |
327 | ||
328 | fetch->pd = cbdataReference(pd); | |
329 | ||
330 | fetch->offset = 0; | |
331 | ||
332 | fetch->state = DIGEST_READ_REPLY; | |
333 | ||
334 | /* update timestamps */ | |
335 | fetch->start_time = squid_curtime; | |
336 | ||
337 | pd->times.requested = squid_curtime; | |
338 | ||
339 | pd_last_req_time = squid_curtime; | |
340 | ||
341 | req->flags.cachable = true; | |
342 | ||
343 | /* the rest is based on clientProcessExpired() */ | |
344 | req->flags.refresh = true; | |
345 | ||
346 | old_e = fetch->old_entry = Store::Root().get(key); | |
347 | ||
348 | if (old_e) { | |
349 | debugs(72, 5, "peerDigestRequest: found old entry"); | |
350 | ||
351 | old_e->lock("peerDigestRequest"); | |
352 | old_e->createMemObject(url, url, req->method); | |
353 | ||
354 | fetch->old_sc = storeClientListAdd(old_e, fetch); | |
355 | } | |
356 | ||
357 | e = fetch->entry = storeCreateEntry(url, url, req->flags, req->method); | |
358 | assert(EBIT_TEST(e->flags, KEY_PRIVATE)); | |
359 | fetch->sc = storeClientListAdd(e, fetch); | |
360 | /* set lastmod to trigger IMS request if possible */ | |
361 | ||
362 | if (old_e) | |
363 | e->lastmod = old_e->lastmod; | |
364 | ||
365 | /* push towards peer cache */ | |
366 | debugs(72, 3, "peerDigestRequest: forwarding to fwdStart..."); | |
367 | ||
368 | FwdState::fwdStart(Comm::ConnectionPointer(), e, req); | |
369 | ||
370 | tempBuffer.offset = 0; | |
371 | ||
372 | tempBuffer.length = SM_PAGE_SIZE; | |
373 | ||
374 | tempBuffer.data = fetch->buf; | |
375 | ||
376 | storeClientCopy(fetch->sc, e, tempBuffer, | |
377 | peerDigestHandleReply, fetch); | |
378 | ||
379 | safe_free(url); | |
380 | } | |
381 | ||
382 | /* Handle the data copying .. */ | |
383 | ||
384 | /* | |
385 | * This routine handles the copy data and then redirects the | |
386 | * copy to a bunch of subfunctions depending upon the copy state. | |
387 | * It also tracks the buffer offset and "seen", since I'm actually | |
388 | * not interested in rewriting everything to suit my little idea. | |
389 | */ | |
390 | static void | |
391 | peerDigestHandleReply(void *data, StoreIOBuffer receivedData) | |
392 | { | |
393 | DigestFetchState *fetch = (DigestFetchState *)data; | |
394 | int retsize = -1; | |
395 | digest_read_state_t prevstate; | |
396 | int newsize; | |
397 | ||
398 | assert(fetch->pd && receivedData.data); | |
399 | /* The existing code assumes that the received pointer is | |
400 | * where we asked the data to be put | |
401 | */ | |
402 | assert(fetch->buf + fetch->bufofs == receivedData.data); | |
403 | ||
404 | /* Update the buffer size */ | |
405 | fetch->bufofs += receivedData.length; | |
406 | ||
407 | assert(fetch->bufofs <= SM_PAGE_SIZE); | |
408 | ||
409 | /* If we've fetched enough, return */ | |
410 | ||
411 | if (peerDigestFetchedEnough(fetch, fetch->buf, fetch->bufofs, "peerDigestHandleReply")) | |
412 | return; | |
413 | ||
414 | /* Call the right function based on the state */ | |
415 | /* (Those functions will update the state if needed) */ | |
416 | ||
417 | /* Give us a temporary reference. Some of the calls we make may | |
418 | * try to destroy the fetch structure, and we like to know if they | |
419 | * do | |
420 | */ | |
421 | fetch = cbdataReference(fetch); | |
422 | ||
423 | /* Repeat this loop until we're out of data OR the state changes */ | |
424 | /* (So keep going if the state has changed and we still have data */ | |
425 | do { | |
426 | prevstate = fetch->state; | |
427 | ||
428 | switch (fetch->state) { | |
429 | ||
430 | case DIGEST_READ_REPLY: | |
431 | retsize = peerDigestFetchReply(fetch, fetch->buf, fetch->bufofs); | |
432 | break; | |
433 | ||
434 | case DIGEST_READ_HEADERS: | |
435 | retsize = peerDigestSwapInHeaders(fetch, fetch->buf, fetch->bufofs); | |
436 | break; | |
437 | ||
438 | case DIGEST_READ_CBLOCK: | |
439 | retsize = peerDigestSwapInCBlock(fetch, fetch->buf, fetch->bufofs); | |
440 | break; | |
441 | ||
442 | case DIGEST_READ_MASK: | |
443 | retsize = peerDigestSwapInMask(fetch, fetch->buf, fetch->bufofs); | |
444 | break; | |
445 | ||
446 | case DIGEST_READ_NONE: | |
447 | break; | |
448 | ||
449 | case DIGEST_READ_DONE: | |
450 | goto finish; | |
451 | break; | |
452 | ||
453 | default: | |
454 | fatal("Bad digest transfer mode!\n"); | |
455 | } | |
456 | ||
457 | if (retsize < 0) | |
458 | goto finish; | |
459 | ||
460 | /* | |
461 | * The returned size indicates how much of the buffer was read - | |
462 | * so move the remainder of the buffer to the beginning | |
463 | * and update the bufofs / bufsize | |
464 | */ | |
465 | newsize = fetch->bufofs - retsize; | |
466 | ||
467 | memmove(fetch->buf, fetch->buf + retsize, fetch->bufofs - newsize); | |
468 | ||
469 | fetch->bufofs = newsize; | |
470 | ||
471 | } while (cbdataReferenceValid(fetch) && prevstate != fetch->state && fetch->bufofs > 0); | |
472 | ||
473 | /* Update the copy offset */ | |
474 | fetch->offset += receivedData.length; | |
475 | ||
476 | /* Schedule another copy */ | |
477 | if (cbdataReferenceValid(fetch)) { | |
478 | StoreIOBuffer tempBuffer; | |
479 | tempBuffer.offset = fetch->offset; | |
480 | tempBuffer.length = SM_PAGE_SIZE - fetch->bufofs; | |
481 | tempBuffer.data = fetch->buf + fetch->bufofs; | |
482 | storeClientCopy(fetch->sc, fetch->entry, tempBuffer, | |
483 | peerDigestHandleReply, fetch); | |
484 | } | |
485 | ||
486 | finish: | |
487 | /* Get rid of our reference, we've finished with it for now */ | |
488 | cbdataReferenceDone(fetch); | |
489 | } | |
490 | ||
491 | /* wait for full http headers to be received then parse them */ | |
492 | /* | |
493 | * This routine handles parsing the reply line. | |
494 | * If the reply line indicates an OK, the same data is thrown | |
495 | * to SwapInHeaders(). If the reply line is a NOT_MODIFIED, | |
496 | * we simply stop parsing. | |
497 | */ | |
498 | static int | |
499 | peerDigestFetchReply(void *data, char *buf, ssize_t size) | |
500 | { | |
501 | DigestFetchState *fetch = (DigestFetchState *)data; | |
502 | PeerDigest *pd = fetch->pd; | |
503 | size_t hdr_size; | |
504 | assert(pd && buf); | |
505 | assert(!fetch->offset); | |
506 | ||
507 | assert(fetch->state == DIGEST_READ_REPLY); | |
508 | ||
509 | if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestFetchReply")) | |
510 | return -1; | |
511 | ||
512 | if ((hdr_size = headersEnd(buf, size))) { | |
513 | HttpReply const *reply = fetch->entry->getReply(); | |
514 | assert(reply); | |
515 | assert(reply->sline.status() != Http::scNone); | |
516 | const Http::StatusCode status = reply->sline.status(); | |
517 | debugs(72, 3, "peerDigestFetchReply: " << pd->host << " status: " << status << | |
518 | ", expires: " << (long int) reply->expires << " (" << std::showpos << | |
519 | (int) (reply->expires - squid_curtime) << ")"); | |
520 | ||
521 | /* this "if" is based on clientHandleIMSReply() */ | |
522 | ||
523 | if (status == Http::scNotModified) { | |
524 | /* our old entry is fine */ | |
525 | assert(fetch->old_entry); | |
526 | ||
527 | if (!fetch->old_entry->mem_obj->request) { | |
528 | fetch->old_entry->mem_obj->request = fetch->entry->mem_obj->request; | |
529 | HTTPMSGLOCK(fetch->old_entry->mem_obj->request); | |
530 | } | |
531 | ||
532 | assert(fetch->old_entry->mem_obj->request); | |
533 | ||
534 | HttpReply *old_rep = (HttpReply *) fetch->old_entry->getReply(); | |
535 | ||
536 | old_rep->updateOnNotModified(reply); | |
537 | ||
538 | fetch->old_entry->timestampsSet(); | |
539 | ||
540 | /* get rid of 304 reply */ | |
541 | storeUnregister(fetch->sc, fetch->entry, fetch); | |
542 | ||
543 | fetch->entry->unlock("peerDigestFetchReply 304"); | |
544 | ||
545 | fetch->entry = fetch->old_entry; | |
546 | ||
547 | fetch->old_entry = NULL; | |
548 | ||
549 | /* preserve request -- we need its size to update counters */ | |
550 | /* requestUnlink(r); */ | |
551 | /* fetch->entry->mem_obj->request = NULL; */ | |
552 | } else if (status == Http::scOkay) { | |
553 | /* get rid of old entry if any */ | |
554 | ||
555 | if (fetch->old_entry) { | |
556 | debugs(72, 3, "peerDigestFetchReply: got new digest, releasing old one"); | |
557 | storeUnregister(fetch->old_sc, fetch->old_entry, fetch); | |
558 | fetch->old_entry->releaseRequest(); | |
559 | fetch->old_entry->unlock("peerDigestFetchReply 200"); | |
560 | fetch->old_entry = NULL; | |
561 | } | |
562 | } else { | |
563 | /* some kind of a bug */ | |
564 | peerDigestFetchAbort(fetch, buf, reply->sline.reason()); | |
565 | return -1; /* XXX -1 will abort stuff in ReadReply! */ | |
566 | } | |
567 | ||
568 | /* must have a ready-to-use store entry if we got here */ | |
569 | /* can we stay with the old in-memory digest? */ | |
570 | if (status == Http::scNotModified && fetch->pd->cd) { | |
571 | peerDigestFetchStop(fetch, buf, "Not modified"); | |
572 | fetch->state = DIGEST_READ_DONE; | |
573 | } else { | |
574 | fetch->state = DIGEST_READ_HEADERS; | |
575 | } | |
576 | } else { | |
577 | /* need more data, do we have space? */ | |
578 | ||
579 | if (size >= SM_PAGE_SIZE) | |
580 | peerDigestFetchAbort(fetch, buf, "reply header too big"); | |
581 | } | |
582 | ||
583 | /* We don't want to actually ack that we've handled anything, | |
584 | * otherwise SwapInHeaders() won't get the reply line .. */ | |
585 | return 0; | |
586 | } | |
587 | ||
588 | /* fetch headers from disk, pass on to SwapInCBlock */ | |
589 | int | |
590 | peerDigestSwapInHeaders(void *data, char *buf, ssize_t size) | |
591 | { | |
592 | DigestFetchState *fetch = (DigestFetchState *)data; | |
593 | size_t hdr_size; | |
594 | ||
595 | assert(fetch->state == DIGEST_READ_HEADERS); | |
596 | ||
597 | if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInHeaders")) | |
598 | return -1; | |
599 | ||
600 | assert(!fetch->offset); | |
601 | ||
602 | if ((hdr_size = headersEnd(buf, size))) { | |
603 | assert(fetch->entry->getReply()); | |
604 | assert(fetch->entry->getReply()->sline.status() != Http::scNone); | |
605 | ||
606 | if (fetch->entry->getReply()->sline.status() != Http::scOkay) { | |
607 | debugs(72, DBG_IMPORTANT, "peerDigestSwapInHeaders: " << fetch->pd->host << | |
608 | " status " << fetch->entry->getReply()->sline.status() << | |
609 | " got cached!"); | |
610 | ||
611 | peerDigestFetchAbort(fetch, buf, "internal status error"); | |
612 | return -1; | |
613 | } | |
614 | ||
615 | fetch->state = DIGEST_READ_CBLOCK; | |
616 | return hdr_size; /* Say how much data we read */ | |
617 | } else { | |
618 | /* need more data, do we have space? */ | |
619 | ||
620 | if (size >= SM_PAGE_SIZE) { | |
621 | peerDigestFetchAbort(fetch, buf, "stored header too big"); | |
622 | return -1; | |
623 | } else { | |
624 | return 0; /* We need to read more to parse .. */ | |
625 | } | |
626 | } | |
627 | ||
628 | fatal("peerDigestSwapInHeaders() - shouldn't get here!\n"); | |
629 | return 0; /* keep gcc happy */ | |
630 | } | |
631 | ||
632 | int | |
633 | peerDigestSwapInCBlock(void *data, char *buf, ssize_t size) | |
634 | { | |
635 | DigestFetchState *fetch = (DigestFetchState *)data; | |
636 | ||
637 | assert(fetch->state == DIGEST_READ_CBLOCK); | |
638 | ||
639 | if (peerDigestFetchedEnough(fetch, buf, size, "peerDigestSwapInCBlock")) | |
640 | return -1; | |
641 | ||
642 | if (size >= (ssize_t)StoreDigestCBlockSize) { | |
643 | PeerDigest *pd = fetch->pd; | |
644 | ||
645 | assert(pd && fetch->entry->getReply()); | |
646 | ||
647 | if (peerDigestSetCBlock(pd, buf)) { | |
648 | /* XXX: soon we will have variable header size */ | |
649 | /* switch to CD buffer and fetch digest guts */ | |
650 | buf = NULL; | |
651 | assert(pd->cd->mask); | |
652 | fetch->state = DIGEST_READ_MASK; | |
653 | return StoreDigestCBlockSize; | |
654 | } else { | |
655 | peerDigestFetchAbort(fetch, buf, "invalid digest cblock"); | |
656 | return -1; | |
657 | } | |
658 | } else { | |
659 | /* need more data, do we have space? */ | |
660 | ||
661 | if (size >= SM_PAGE_SIZE) { | |
662 | peerDigestFetchAbort(fetch, buf, "digest cblock too big"); | |
663 | return -1; | |
664 | } else { | |
665 | return 0; /* We need more data */ | |
666 | } | |
667 | } | |
668 | ||
669 | fatal("peerDigestSwapInCBlock(): shouldn't get here!\n"); | |
670 | return 0; /* keep gcc happy */ | |
671 | } | |
672 | ||
673 | int | |
674 | peerDigestSwapInMask(void *data, char *buf, ssize_t size) | |
675 | { | |
676 | DigestFetchState *fetch = (DigestFetchState *)data; | |
677 | PeerDigest *pd; | |
678 | ||
679 | pd = fetch->pd; | |
680 | assert(pd->cd && pd->cd->mask); | |
681 | ||
682 | /* | |
683 | * NOTENOTENOTENOTENOTE: buf doesn't point to pd->cd->mask anymore! | |
684 | * we need to do the copy ourselves! | |
685 | */ | |
686 | memcpy(pd->cd->mask + fetch->mask_offset, buf, size); | |
687 | ||
688 | /* NOTE! buf points to the middle of pd->cd->mask! */ | |
689 | ||
690 | if (peerDigestFetchedEnough(fetch, NULL, size, "peerDigestSwapInMask")) | |
691 | return -1; | |
692 | ||
693 | fetch->mask_offset += size; | |
694 | ||
695 | if (fetch->mask_offset >= pd->cd->mask_size) { | |
696 | debugs(72, 2, "peerDigestSwapInMask: Done! Got " << | |
697 | fetch->mask_offset << ", expected " << pd->cd->mask_size); | |
698 | assert(fetch->mask_offset == pd->cd->mask_size); | |
699 | assert(peerDigestFetchedEnough(fetch, NULL, 0, "peerDigestSwapInMask")); | |
700 | return -1; /* XXX! */ | |
701 | } else { | |
702 | /* We always read everything, so return so */ | |
703 | return size; | |
704 | } | |
705 | ||
706 | fatal("peerDigestSwapInMask(): shouldn't get here!\n"); | |
707 | return 0; /* keep gcc happy */ | |
708 | } | |
709 | ||
710 | static int | |
711 | peerDigestFetchedEnough(DigestFetchState * fetch, char *buf, ssize_t size, const char *step_name) | |
712 | { | |
713 | PeerDigest *pd = NULL; | |
714 | const char *host = "<unknown>"; /* peer host */ | |
715 | const char *reason = NULL; /* reason for completion */ | |
716 | const char *no_bug = NULL; /* successful completion if set */ | |
717 | const int pdcb_valid = cbdataReferenceValid(fetch->pd); | |
718 | const int pcb_valid = cbdataReferenceValid(fetch->pd->peer); | |
719 | ||
720 | /* test possible exiting conditions (the same for most steps!) | |
721 | * cases marked with '?!' should not happen */ | |
722 | ||
723 | if (!reason) { | |
724 | if (!(pd = fetch->pd)) | |
725 | reason = "peer digest disappeared?!"; | |
726 | ||
727 | #if DONT /* WHY NOT? /HNO */ | |
728 | ||
729 | else if (!cbdataReferenceValid(pd)) | |
730 | reason = "invalidated peer digest?!"; | |
731 | ||
732 | #endif | |
733 | ||
734 | else | |
735 | host = pd->host.termedBuf(); | |
736 | } | |
737 | ||
738 | debugs(72, 6, step_name << ": peer " << host << ", offset: " << | |
739 | fetch->offset << " size: " << size << "."); | |
740 | ||
741 | /* continue checking (with pd and host known and valid) */ | |
742 | ||
743 | if (!reason) { | |
744 | if (!cbdataReferenceValid(pd->peer)) | |
745 | reason = "peer disappeared"; | |
746 | else if (size < 0) | |
747 | reason = "swap failure"; | |
748 | else if (!fetch->entry) | |
749 | reason = "swap aborted?!"; | |
750 | else if (EBIT_TEST(fetch->entry->flags, ENTRY_ABORTED)) | |
751 | reason = "swap aborted"; | |
752 | } | |
753 | ||
754 | /* continue checking (maybe-successful eof case) */ | |
755 | if (!reason && !size) { | |
756 | if (!pd->cd) | |
757 | reason = "null digest?!"; | |
758 | else if (fetch->mask_offset != (int)pd->cd->mask_size) | |
759 | reason = "premature end of digest?!"; | |
760 | else if (!peerDigestUseful(pd)) | |
761 | reason = "useless digest"; | |
762 | else | |
763 | reason = no_bug = "success"; | |
764 | } | |
765 | ||
766 | /* finish if we have a reason */ | |
767 | if (reason) { | |
768 | const int level = strstr(reason, "?!") ? 1 : 3; | |
769 | debugs(72, level, "" << step_name << ": peer " << host << ", exiting after '" << reason << "'"); | |
770 | peerDigestReqFinish(fetch, buf, | |
771 | 1, pdcb_valid, pcb_valid, reason, !no_bug); | |
772 | } else { | |
773 | /* paranoid check */ | |
774 | assert(pdcb_valid && pcb_valid); | |
775 | } | |
776 | ||
777 | return reason != NULL; | |
778 | } | |
779 | ||
780 | /* call this when all callback data is valid and fetch must be stopped but | |
781 | * no error has occurred (e.g. we received 304 reply and reuse old digest) */ | |
782 | static void | |
783 | peerDigestFetchStop(DigestFetchState * fetch, char *buf, const char *reason) | |
784 | { | |
785 | assert(reason); | |
786 | debugs(72, 2, "peerDigestFetchStop: peer " << fetch->pd->host << ", reason: " << reason); | |
787 | peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 0); | |
788 | } | |
789 | ||
790 | /* call this when all callback data is valid but something bad happened */ | |
791 | static void | |
792 | peerDigestFetchAbort(DigestFetchState * fetch, char *buf, const char *reason) | |
793 | { | |
794 | assert(reason); | |
795 | debugs(72, 2, "peerDigestFetchAbort: peer " << fetch->pd->host << ", reason: " << reason); | |
796 | peerDigestReqFinish(fetch, buf, 1, 1, 1, reason, 1); | |
797 | } | |
798 | ||
799 | /* complete the digest transfer, update stats, unlock/release everything */ | |
800 | static void | |
801 | peerDigestReqFinish(DigestFetchState * fetch, char *buf, | |
802 | int fcb_valid, int pdcb_valid, int pcb_valid, | |
803 | const char *reason, int err) | |
804 | { | |
805 | assert(reason); | |
806 | ||
807 | /* must go before peerDigestPDFinish */ | |
808 | ||
809 | if (pdcb_valid) { | |
810 | fetch->pd->flags.requested = false; | |
811 | fetch->pd->req_result = reason; | |
812 | } | |
813 | ||
814 | /* schedule next check if peer is still out there */ | |
815 | if (pcb_valid) { | |
816 | PeerDigest *pd = fetch->pd; | |
817 | ||
818 | if (err) { | |
819 | pd->times.retry_delay = peerDigestIncDelay(pd); | |
820 | peerDigestSetCheck(pd, pd->times.retry_delay); | |
821 | } else { | |
822 | pd->times.retry_delay = 0; | |
823 | peerDigestSetCheck(pd, peerDigestNewDelay(fetch->entry)); | |
824 | } | |
825 | } | |
826 | ||
827 | /* note: order is significant */ | |
828 | if (fcb_valid) | |
829 | peerDigestFetchSetStats(fetch); | |
830 | ||
831 | if (pdcb_valid) | |
832 | peerDigestPDFinish(fetch, pcb_valid, err); | |
833 | ||
834 | if (fcb_valid) | |
835 | peerDigestFetchFinish(fetch, err); | |
836 | } | |
837 | ||
838 | /* destroys digest if peer disappeared | |
839 | * must be called only when fetch and pd cbdata are valid */ | |
840 | static void | |
841 | peerDigestPDFinish(DigestFetchState * fetch, int pcb_valid, int err) | |
842 | { | |
843 | PeerDigest *pd = fetch->pd; | |
844 | const char *host = pd->host.termedBuf(); | |
845 | ||
846 | pd->times.received = squid_curtime; | |
847 | pd->times.req_delay = fetch->resp_time; | |
848 | kb_incr(&pd->stats.sent.kbytes, (size_t) fetch->sent.bytes); | |
849 | kb_incr(&pd->stats.recv.kbytes, (size_t) fetch->recv.bytes); | |
850 | pd->stats.sent.msgs += fetch->sent.msg; | |
851 | pd->stats.recv.msgs += fetch->recv.msg; | |
852 | ||
853 | if (err) { | |
854 | debugs(72, DBG_IMPORTANT, "" << (pcb_valid ? "temporary " : "" ) << "disabling (" << pd->req_result << ") digest from " << host); | |
855 | ||
856 | if (pd->cd) { | |
857 | cacheDigestDestroy(pd->cd); | |
858 | pd->cd = NULL; | |
859 | } | |
860 | ||
861 | pd->flags.usable = false; | |
862 | ||
863 | if (!pcb_valid) | |
864 | peerDigestNotePeerGone(pd); | |
865 | } else { | |
866 | assert(pcb_valid); | |
867 | ||
868 | pd->flags.usable = true; | |
869 | ||
870 | /* XXX: ugly condition, but how? */ | |
871 | ||
872 | if (fetch->entry->store_status == STORE_OK) | |
873 | debugs(72, 2, "re-used old digest from " << host); | |
874 | else | |
875 | debugs(72, 2, "received valid digest from " << host); | |
876 | } | |
877 | ||
878 | cbdataReferenceDone(fetch->pd); | |
879 | } | |
880 | ||
881 | /* free fetch state structures | |
882 | * must be called only when fetch cbdata is valid */ | |
883 | static void | |
884 | peerDigestFetchFinish(DigestFetchState * fetch, int err) | |
885 | { | |
886 | assert(fetch->entry && fetch->request); | |
887 | ||
888 | if (fetch->old_entry) { | |
889 | debugs(72, 3, "peerDigestFetchFinish: deleting old entry"); | |
890 | storeUnregister(fetch->old_sc, fetch->old_entry, fetch); | |
891 | fetch->old_entry->releaseRequest(); | |
892 | fetch->old_entry->unlock("peerDigestFetchFinish old"); | |
893 | fetch->old_entry = NULL; | |
894 | } | |
895 | ||
896 | /* update global stats */ | |
897 | kb_incr(&statCounter.cd.kbytes_sent, (size_t) fetch->sent.bytes); | |
898 | ||
899 | kb_incr(&statCounter.cd.kbytes_recv, (size_t) fetch->recv.bytes); | |
900 | ||
901 | statCounter.cd.msgs_sent += fetch->sent.msg; | |
902 | ||
903 | statCounter.cd.msgs_recv += fetch->recv.msg; | |
904 | ||
905 | /* unlock everything */ | |
906 | storeUnregister(fetch->sc, fetch->entry, fetch); | |
907 | ||
908 | fetch->entry->unlock("peerDigestFetchFinish new"); | |
909 | ||
910 | HTTPMSGUNLOCK(fetch->request); | |
911 | ||
912 | fetch->entry = NULL; | |
913 | ||
914 | assert(fetch->pd == NULL); | |
915 | ||
916 | cbdataFree(fetch); | |
917 | } | |
918 | ||
919 | /* calculate fetch stats after completion */ | |
920 | static void | |
921 | peerDigestFetchSetStats(DigestFetchState * fetch) | |
922 | { | |
923 | MemObject *mem; | |
924 | assert(fetch->entry && fetch->request); | |
925 | ||
926 | mem = fetch->entry->mem_obj; | |
927 | assert(mem); | |
928 | ||
929 | /* XXX: outgoing numbers are not precise */ | |
930 | /* XXX: we must distinguish between 304 hits and misses here */ | |
931 | fetch->sent.bytes = fetch->request->prefixLen(); | |
932 | /* XXX: this is slightly wrong: we don't KNOW that the entire memobject | |
933 | * was fetched. We only know how big it is | |
934 | */ | |
935 | fetch->recv.bytes = mem->size(); | |
936 | fetch->sent.msg = fetch->recv.msg = 1; | |
937 | fetch->expires = fetch->entry->expires; | |
938 | fetch->resp_time = squid_curtime - fetch->start_time; | |
939 | ||
940 | debugs(72, 3, "peerDigestFetchFinish: recv " << fetch->recv.bytes << | |
941 | " bytes in " << (int) fetch->resp_time << " secs"); | |
942 | ||
943 | debugs(72, 3, "peerDigestFetchFinish: expires: " << | |
944 | (long int) fetch->expires << " (" << std::showpos << | |
945 | (int) (fetch->expires - squid_curtime) << "), lmt: " << | |
946 | std::noshowpos << (long int) fetch->entry->lastmod << " (" << | |
947 | std::showpos << (int) (fetch->entry->lastmod - squid_curtime) << | |
948 | ")"); | |
949 | ||
950 | } | |
951 | ||
952 | static int | |
953 | peerDigestSetCBlock(PeerDigest * pd, const char *buf) | |
954 | { | |
955 | StoreDigestCBlock cblock; | |
956 | int freed_size = 0; | |
957 | const char *host = pd->host.termedBuf(); | |
958 | ||
959 | memcpy(&cblock, buf, sizeof(cblock)); | |
960 | /* network -> host conversions */ | |
961 | cblock.ver.current = ntohs(cblock.ver.current); | |
962 | cblock.ver.required = ntohs(cblock.ver.required); | |
963 | cblock.capacity = ntohl(cblock.capacity); | |
964 | cblock.count = ntohl(cblock.count); | |
965 | cblock.del_count = ntohl(cblock.del_count); | |
966 | cblock.mask_size = ntohl(cblock.mask_size); | |
967 | debugs(72, 2, "got digest cblock from " << host << "; ver: " << | |
968 | (int) cblock.ver.current << " (req: " << (int) cblock.ver.required << | |
969 | ")"); | |
970 | ||
971 | debugs(72, 2, "\t size: " << | |
972 | cblock.mask_size << " bytes, e-cnt: " << | |
973 | cblock.count << ", e-util: " << | |
974 | xpercentInt(cblock.count, cblock.capacity) << "%" ); | |
975 | /* check version requirements (both ways) */ | |
976 | ||
977 | if (cblock.ver.required > CacheDigestVer.current) { | |
978 | debugs(72, DBG_IMPORTANT, "" << host << " digest requires version " << | |
979 | cblock.ver.required << "; have: " << CacheDigestVer.current); | |
980 | ||
981 | return 0; | |
982 | } | |
983 | ||
984 | if (cblock.ver.current < CacheDigestVer.required) { | |
985 | debugs(72, DBG_IMPORTANT, "" << host << " digest is version " << | |
986 | cblock.ver.current << "; we require: " << | |
987 | CacheDigestVer.required); | |
988 | ||
989 | return 0; | |
990 | } | |
991 | ||
992 | /* check consistency */ | |
993 | if (cblock.ver.required > cblock.ver.current || | |
994 | cblock.mask_size <= 0 || cblock.capacity <= 0 || | |
995 | cblock.bits_per_entry <= 0 || cblock.hash_func_count <= 0) { | |
996 | debugs(72, DBG_CRITICAL, "" << host << " digest cblock is corrupted."); | |
997 | return 0; | |
998 | } | |
999 | ||
1000 | /* check consistency further */ | |
1001 | if ((size_t)cblock.mask_size != cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry)) { | |
1002 | debugs(72, DBG_CRITICAL, host << " digest cblock is corrupted " << | |
1003 | "(mask size mismatch: " << cblock.mask_size << " ? " << | |
1004 | cacheDigestCalcMaskSize(cblock.capacity, cblock.bits_per_entry) | |
1005 | << ")."); | |
1006 | return 0; | |
1007 | } | |
1008 | ||
1009 | /* there are some things we cannot do yet */ | |
1010 | if (cblock.hash_func_count != CacheDigestHashFuncCount) { | |
1011 | debugs(72, DBG_CRITICAL, "" << host << " digest: unsupported #hash functions: " << | |
1012 | cblock.hash_func_count << " ? " << CacheDigestHashFuncCount << "."); | |
1013 | return 0; | |
1014 | } | |
1015 | ||
1016 | /* | |
1017 | * no cblock bugs below this point | |
1018 | */ | |
1019 | /* check size changes */ | |
1020 | if (pd->cd && cblock.mask_size != (ssize_t)pd->cd->mask_size) { | |
1021 | debugs(72, 2, host << " digest changed size: " << cblock.mask_size << | |
1022 | " -> " << pd->cd->mask_size); | |
1023 | freed_size = pd->cd->mask_size; | |
1024 | cacheDigestDestroy(pd->cd); | |
1025 | pd->cd = NULL; | |
1026 | } | |
1027 | ||
1028 | if (!pd->cd) { | |
1029 | debugs(72, 2, "creating " << host << " digest; size: " << cblock.mask_size << " (" << | |
1030 | std::showpos << (int) (cblock.mask_size - freed_size) << ") bytes"); | |
1031 | pd->cd = cacheDigestCreate(cblock.capacity, cblock.bits_per_entry); | |
1032 | ||
1033 | if (cblock.mask_size >= freed_size) | |
1034 | kb_incr(&statCounter.cd.memory, cblock.mask_size - freed_size); | |
1035 | } | |
1036 | ||
1037 | assert(pd->cd); | |
1038 | /* these assignments leave us in an inconsistent state until we finish reading the digest */ | |
1039 | pd->cd->count = cblock.count; | |
1040 | pd->cd->del_count = cblock.del_count; | |
1041 | return 1; | |
1042 | } | |
1043 | ||
1044 | static int | |
1045 | peerDigestUseful(const PeerDigest * pd) | |
1046 | { | |
1047 | /* TODO: we should calculate the prob of a false hit instead of bit util */ | |
1048 | const int bit_util = cacheDigestBitUtil(pd->cd); | |
1049 | ||
1050 | if (bit_util > 65) { | |
1051 | debugs(72, DBG_CRITICAL, "Warning: " << pd->host << | |
1052 | " peer digest has too many bits on (" << bit_util << "%%)."); | |
1053 | ||
1054 | return 0; | |
1055 | } | |
1056 | ||
1057 | return 1; | |
1058 | } | |
1059 | ||
1060 | static int | |
1061 | saneDiff(time_t diff) | |
1062 | { | |
1063 | return abs((int) diff) > squid_curtime / 2 ? 0 : diff; | |
1064 | } | |
1065 | ||
1066 | void | |
1067 | peerDigestStatsReport(const PeerDigest * pd, StoreEntry * e) | |
1068 | { | |
1069 | #define f2s(flag) (pd->flags.flag ? "yes" : "no") | |
1070 | #define appendTime(tm) storeAppendPrintf(e, "%s\t %10ld\t %+d\t %+d\n", \ | |
1071 | ""#tm, (long int)pd->times.tm, \ | |
1072 | saneDiff(pd->times.tm - squid_curtime), \ | |
1073 | saneDiff(pd->times.tm - pd->times.initialized)) | |
1074 | ||
1075 | assert(pd); | |
1076 | ||
1077 | const char *host = pd->host.termedBuf(); | |
1078 | storeAppendPrintf(e, "\npeer digest from %s\n", host); | |
1079 | ||
1080 | cacheDigestGuessStatsReport(&pd->stats.guess, e, host); | |
1081 | ||
1082 | storeAppendPrintf(e, "\nevent\t timestamp\t secs from now\t secs from init\n"); | |
1083 | appendTime(initialized); | |
1084 | appendTime(needed); | |
1085 | appendTime(requested); | |
1086 | appendTime(received); | |
1087 | appendTime(next_check); | |
1088 | ||
1089 | storeAppendPrintf(e, "peer digest state:\n"); | |
1090 | storeAppendPrintf(e, "\tneeded: %3s, usable: %3s, requested: %3s\n", | |
1091 | f2s(needed), f2s(usable), f2s(requested)); | |
1092 | storeAppendPrintf(e, "\n\tlast retry delay: %d secs\n", | |
1093 | (int) pd->times.retry_delay); | |
1094 | storeAppendPrintf(e, "\tlast request response time: %d secs\n", | |
1095 | (int) pd->times.req_delay); | |
1096 | storeAppendPrintf(e, "\tlast request result: %s\n", | |
1097 | pd->req_result ? pd->req_result : "(none)"); | |
1098 | ||
1099 | storeAppendPrintf(e, "\npeer digest traffic:\n"); | |
1100 | storeAppendPrintf(e, "\trequests sent: %d, volume: %d KB\n", | |
1101 | pd->stats.sent.msgs, (int) pd->stats.sent.kbytes.kb); | |
1102 | storeAppendPrintf(e, "\treplies recv: %d, volume: %d KB\n", | |
1103 | pd->stats.recv.msgs, (int) pd->stats.recv.kbytes.kb); | |
1104 | ||
1105 | storeAppendPrintf(e, "\npeer digest structure:\n"); | |
1106 | ||
1107 | if (pd->cd) | |
1108 | cacheDigestReport(pd->cd, host, e); | |
1109 | else | |
1110 | storeAppendPrintf(e, "\tno in-memory copy\n"); | |
1111 | } | |
1112 | ||
1113 | #endif | |
1114 |