]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 1996-2025 The Squid Software Foundation and contributors | |
3 | * | |
4 | * Squid software is distributed under GPLv2+ license and includes | |
5 | * contributions from numerous individuals and organizations. | |
6 | * Please see the COPYING and CONTRIBUTORS files for details. | |
7 | */ | |
8 | ||
9 | /* DEBUG: section 20 Storage Manager */ | |
10 | ||
11 | #include "squid.h" | |
12 | #include "base/AsyncCbdataCalls.h" | |
13 | #include "base/IoManip.h" | |
14 | #include "base/PackableStream.h" | |
15 | #include "base/TextException.h" | |
16 | #include "CacheDigest.h" | |
17 | #include "CacheManager.h" | |
18 | #include "CollapsedForwarding.h" | |
19 | #include "comm/Connection.h" | |
20 | #include "comm/Read.h" | |
21 | #include "debug/Messages.h" | |
22 | #if HAVE_DISKIO_MODULE_IPCIO | |
23 | #include "DiskIO/IpcIo/IpcIoFile.h" | |
24 | #endif | |
25 | #include "ETag.h" | |
26 | #include "event.h" | |
27 | #include "fde.h" | |
28 | #include "globals.h" | |
29 | #include "http.h" | |
30 | #include "HttpReply.h" | |
31 | #include "HttpRequest.h" | |
32 | #include "mem_node.h" | |
33 | #include "MemObject.h" | |
34 | #include "MemStore.h" | |
35 | #include "mgr/Registration.h" | |
36 | #include "mgr/StoreIoAction.h" | |
37 | #include "repl_modules.h" | |
38 | #include "RequestFlags.h" | |
39 | #include "sbuf/Stream.h" | |
40 | #include "SquidConfig.h" | |
41 | #include "StatCounters.h" | |
42 | #include "stmem.h" | |
43 | #include "Store.h" | |
44 | #include "store/Controller.h" | |
45 | #include "store/Disk.h" | |
46 | #include "store/Disks.h" | |
47 | #include "store/SwapMetaOut.h" | |
48 | #include "store_digest.h" | |
49 | #include "store_key_md5.h" | |
50 | #include "store_log.h" | |
51 | #include "store_rebuild.h" | |
52 | #include "StoreClient.h" | |
53 | #include "StoreIOState.h" | |
54 | #include "StrList.h" | |
55 | #include "swap_log_op.h" | |
56 | #include "tools.h" | |
57 | #if USE_DELAY_POOLS | |
58 | #include "DelayPools.h" | |
59 | #endif | |
60 | ||
61 | /** StoreEntry uses explicit new/delete operators, which set pool chunk size to 2MB | |
62 | * XXX: convert to MEMPROXY_CLASS() API | |
63 | */ | |
64 | #include "mem/Allocator.h" | |
65 | #include "mem/Pool.h" | |
66 | ||
67 | #include <climits> | |
68 | #include <stack> | |
69 | ||
70 | #define REBUILD_TIMESTAMP_DELTA_MAX 2 | |
71 | ||
72 | #define STORE_IN_MEM_BUCKETS (229) | |
73 | ||
74 | // TODO: Convert these string constants to enum string-arrays generated | |
75 | ||
76 | const char *memStatusStr[] = { | |
77 | "NOT_IN_MEMORY", | |
78 | "IN_MEMORY" | |
79 | }; | |
80 | ||
81 | const char *pingStatusStr[] = { | |
82 | "PING_NONE", | |
83 | "PING_WAITING", | |
84 | "PING_DONE" | |
85 | }; | |
86 | ||
87 | const char *storeStatusStr[] = { | |
88 | "STORE_OK", | |
89 | "STORE_PENDING" | |
90 | }; | |
91 | ||
92 | const char *swapStatusStr[] = { | |
93 | "SWAPOUT_NONE", | |
94 | "SWAPOUT_WRITING", | |
95 | "SWAPOUT_DONE", | |
96 | "SWAPOUT_FAILED" | |
97 | }; | |
98 | ||
99 | /* | |
100 | * This defines an repl type | |
101 | */ | |
102 | ||
103 | typedef struct _storerepl_entry storerepl_entry_t; | |
104 | ||
105 | struct _storerepl_entry { | |
106 | const char *typestr; | |
107 | REMOVALPOLICYCREATE *create; | |
108 | }; | |
109 | ||
110 | static storerepl_entry_t *storerepl_list = nullptr; | |
111 | ||
112 | /* | |
113 | * local function prototypes | |
114 | */ | |
115 | static int getKeyCounter(void); | |
116 | static OBJH storeCheckCachableStats; | |
117 | static EVH storeLateRelease; | |
118 | ||
119 | /* | |
120 | * local variables | |
121 | */ | |
122 | static std::stack<StoreEntry*> LateReleaseStack; | |
123 | Mem::Allocator *StoreEntry::pool = nullptr; | |
124 | ||
125 | void | |
126 | Store::Stats(StoreEntry * output) | |
127 | { | |
128 | assert(output); | |
129 | Root().stat(*output); | |
130 | } | |
131 | ||
132 | /// reports the current state of Store-related queues | |
133 | static void | |
134 | StatQueues(StoreEntry *e) | |
135 | { | |
136 | assert(e); | |
137 | PackableStream stream(*e); | |
138 | CollapsedForwarding::StatQueue(stream); | |
139 | #if HAVE_DISKIO_MODULE_IPCIO | |
140 | stream << "\n"; | |
141 | IpcIoFile::StatQueue(stream); | |
142 | #endif | |
143 | stream.flush(); | |
144 | } | |
145 | ||
146 | // XXX: new/delete operators need to be replaced with MEMPROXY_CLASS | |
147 | // definitions but doing so exposes bug 4370, and maybe 4354 and 4355 | |
148 | void * | |
149 | StoreEntry::operator new (size_t bytecount) | |
150 | { | |
151 | assert(bytecount == sizeof (StoreEntry)); | |
152 | ||
153 | if (!pool) { | |
154 | pool = memPoolCreate ("StoreEntry", bytecount); | |
155 | } | |
156 | ||
157 | return pool->alloc(); | |
158 | } | |
159 | ||
160 | void | |
161 | StoreEntry::operator delete (void *address) | |
162 | { | |
163 | pool->freeOne(address); | |
164 | } | |
165 | ||
166 | bool | |
167 | StoreEntry::makePublic(const KeyScope scope) | |
168 | { | |
169 | /* This object can be cached for a long time */ | |
170 | return !EBIT_TEST(flags, RELEASE_REQUEST) && setPublicKey(scope); | |
171 | } | |
172 | ||
173 | void | |
174 | StoreEntry::makePrivate(const bool shareable) | |
175 | { | |
176 | releaseRequest(shareable); /* delete object when not used */ | |
177 | } | |
178 | ||
179 | void | |
180 | StoreEntry::clearPrivate() | |
181 | { | |
182 | assert(!EBIT_TEST(flags, RELEASE_REQUEST)); | |
183 | EBIT_CLR(flags, KEY_PRIVATE); | |
184 | shareableWhenPrivate = false; | |
185 | } | |
186 | ||
187 | bool | |
188 | StoreEntry::cacheNegatively() | |
189 | { | |
190 | /* This object may be negatively cached */ | |
191 | if (makePublic()) { | |
192 | negativeCache(); | |
193 | return true; | |
194 | } | |
195 | return false; | |
196 | } | |
197 | ||
198 | size_t | |
199 | StoreEntry::inUseCount() | |
200 | { | |
201 | if (!pool) | |
202 | return 0; | |
203 | return pool->getInUseCount(); | |
204 | } | |
205 | ||
206 | const char * | |
207 | StoreEntry::getMD5Text() const | |
208 | { | |
209 | return storeKeyText((const cache_key *)key); | |
210 | } | |
211 | ||
212 | size_t | |
213 | StoreEntry::bytesWanted (Range<size_t> const aRange, bool ignoreDelayPools) const | |
214 | { | |
215 | if (mem_obj == nullptr) | |
216 | return aRange.end; | |
217 | ||
218 | #if URL_CHECKSUM_DEBUG | |
219 | ||
220 | mem_obj->checkUrlChecksum(); | |
221 | ||
222 | #endif | |
223 | ||
224 | if (!mem_obj->readAheadPolicyCanRead()) | |
225 | return 0; | |
226 | ||
227 | return mem_obj->mostBytesWanted(aRange.end, ignoreDelayPools); | |
228 | } | |
229 | ||
230 | bool | |
231 | StoreEntry::hasParsedReplyHeader() const | |
232 | { | |
233 | if (mem_obj) { | |
234 | const auto &reply = mem_obj->baseReply(); | |
235 | if (reply.pstate == Http::Message::psParsed) { | |
236 | debugs(20, 7, reply.hdr_sz); | |
237 | return true; | |
238 | } | |
239 | } | |
240 | return false; | |
241 | } | |
242 | ||
243 | bool | |
244 | StoreEntry::checkDeferRead(int) const | |
245 | { | |
246 | return (bytesWanted(Range<size_t>(0,INT_MAX)) == 0); | |
247 | } | |
248 | ||
249 | void | |
250 | StoreEntry::setNoDelay(bool const newValue) | |
251 | { | |
252 | if (mem_obj) | |
253 | mem_obj->setNoDelay(newValue); | |
254 | } | |
255 | ||
256 | // XXX: Type names mislead. STORE_DISK_CLIENT actually means that we should | |
257 | // open swapin file, aggressively trim memory, and ignore read-ahead gap. | |
258 | // It does not mean we will read from disk exclusively (or at all!). | |
259 | // STORE_MEM_CLIENT covers all other cases, including in-memory entries, | |
260 | // newly created entries, and entries not backed by disk or memory cache. | |
261 | // XXX: May create STORE_DISK_CLIENT with no disk caching configured. | |
262 | // XXX: Collapsed clients cannot predict their type. | |
263 | store_client_t | |
264 | StoreEntry::storeClientType() const | |
265 | { | |
266 | /* The needed offset isn't in memory | |
267 | * XXX TODO: this is wrong for range requests | |
268 | * as the needed offset may *not* be 0, AND | |
269 | * offset 0 in the memory object is the HTTP headers. | |
270 | */ | |
271 | ||
272 | assert(mem_obj); | |
273 | ||
274 | debugs(20, 7, *this << " inmem_lo=" << mem_obj->inmem_lo); | |
275 | ||
276 | if (mem_obj->inmem_lo) | |
277 | return STORE_DISK_CLIENT; | |
278 | ||
279 | if (EBIT_TEST(flags, ENTRY_ABORTED)) { | |
280 | /* I don't think we should be adding clients to aborted entries */ | |
281 | debugs(20, DBG_IMPORTANT, "storeClientType: adding to ENTRY_ABORTED entry"); | |
282 | return STORE_MEM_CLIENT; | |
283 | } | |
284 | ||
285 | if (swapoutFailed()) | |
286 | return STORE_MEM_CLIENT; | |
287 | ||
288 | if (store_status == STORE_OK) { | |
289 | /* the object has completed. */ | |
290 | ||
291 | if (mem_obj->inmem_lo == 0 && !isEmpty()) { | |
292 | if (swappedOut()) { | |
293 | debugs(20,7, mem_obj << " lo: " << mem_obj->inmem_lo << " hi: " << mem_obj->endOffset() << " size: " << mem_obj->object_sz); | |
294 | if (mem_obj->endOffset() == mem_obj->object_sz) { | |
295 | /* hot object fully swapped in (XXX: or swapped out?) */ | |
296 | return STORE_MEM_CLIENT; | |
297 | } | |
298 | } else { | |
299 | /* Memory-only, or currently being swapped out */ | |
300 | return STORE_MEM_CLIENT; | |
301 | } | |
302 | } | |
303 | debugs(20, 7, "STORE_OK STORE_DISK_CLIENT"); | |
304 | return STORE_DISK_CLIENT; | |
305 | } | |
306 | ||
307 | /* here and past, entry is STORE_PENDING */ | |
308 | /* | |
309 | * If this is the first client, let it be the mem client | |
310 | */ | |
311 | if (mem_obj->nclients == 0) | |
312 | return STORE_MEM_CLIENT; | |
313 | ||
314 | /* | |
315 | * If there is no disk file to open yet, we must make this a | |
316 | * mem client. If we can't open the swapin file before writing | |
317 | * to the client, there is no guarantee that we will be able | |
318 | * to open it later when we really need it. | |
319 | */ | |
320 | if (swap_status == SWAPOUT_NONE) | |
321 | return STORE_MEM_CLIENT; | |
322 | ||
323 | // TODO: The above "must make this a mem client" logic contradicts "Slight | |
324 | // weirdness" logic in store_client::doCopy() that converts hits to misses | |
325 | // on startSwapin() failures. We should probably attempt to open a swapin | |
326 | // file _here_ instead (and avoid STORE_DISK_CLIENT designation for clients | |
327 | // that fail to do so). That would also address a similar problem with Rock | |
328 | // store that does not yet support swapin during SWAPOUT_WRITING. | |
329 | ||
330 | /* | |
331 | * otherwise, make subsequent clients read from disk so they | |
332 | * can not delay the first, and vice-versa. | |
333 | */ | |
334 | debugs(20, 7, "STORE_PENDING STORE_DISK_CLIENT"); | |
335 | return STORE_DISK_CLIENT; | |
336 | } | |
337 | ||
338 | StoreEntry::StoreEntry() : | |
339 | mem_obj(nullptr), | |
340 | timestamp(-1), | |
341 | lastref(-1), | |
342 | expires(-1), | |
343 | lastModified_(-1), | |
344 | swap_file_sz(0), | |
345 | refcount(0), | |
346 | flags(0), | |
347 | swap_filen(-1), | |
348 | swap_dirn(-1), | |
349 | mem_status(NOT_IN_MEMORY), | |
350 | ping_status(PING_NONE), | |
351 | store_status(STORE_PENDING), | |
352 | swap_status(SWAPOUT_NONE), | |
353 | lock_count(0), | |
354 | shareableWhenPrivate(false) | |
355 | { | |
356 | debugs(20, 5, "StoreEntry constructed, this=" << this); | |
357 | } | |
358 | ||
359 | StoreEntry::~StoreEntry() | |
360 | { | |
361 | debugs(20, 5, "StoreEntry destructed, this=" << this); | |
362 | } | |
363 | ||
364 | #if USE_ADAPTATION | |
365 | void | |
366 | StoreEntry::deferProducer(const AsyncCall::Pointer &producer) | |
367 | { | |
368 | if (!deferredProducer) | |
369 | deferredProducer = producer; | |
370 | else | |
371 | debugs(20, 5, "Deferred producer call is already set to: " << | |
372 | *deferredProducer << ", requested call: " << *producer); | |
373 | } | |
374 | ||
375 | void | |
376 | StoreEntry::kickProducer() | |
377 | { | |
378 | if (deferredProducer != nullptr) { | |
379 | ScheduleCallHere(deferredProducer); | |
380 | deferredProducer = nullptr; | |
381 | } | |
382 | } | |
383 | #endif | |
384 | ||
385 | void | |
386 | StoreEntry::destroyMemObject() | |
387 | { | |
388 | debugs(20, 3, mem_obj << " in " << *this); | |
389 | ||
390 | if (hasTransients()) | |
391 | Store::Root().transientsDisconnect(*this); | |
392 | if (hasMemStore()) | |
393 | Store::Root().memoryDisconnect(*this); | |
394 | ||
395 | if (auto memObj = mem_obj) { | |
396 | setMemStatus(NOT_IN_MEMORY); | |
397 | mem_obj = nullptr; | |
398 | delete memObj; | |
399 | } | |
400 | } | |
401 | ||
402 | void | |
403 | destroyStoreEntry(void *data) | |
404 | { | |
405 | debugs(20, 3, "destroyStoreEntry: destroying " << data); | |
406 | StoreEntry *e = static_cast<StoreEntry *>(static_cast<hash_link *>(data)); | |
407 | assert(e != nullptr); | |
408 | ||
409 | if (e->hasDisk()) | |
410 | e->disk().disconnect(*e); | |
411 | ||
412 | e->destroyMemObject(); | |
413 | ||
414 | e->hashDelete(); | |
415 | ||
416 | assert(e->key == nullptr); | |
417 | ||
418 | delete e; | |
419 | } | |
420 | ||
421 | /* ----- INTERFACE BETWEEN STORAGE MANAGER AND HASH TABLE FUNCTIONS --------- */ | |
422 | ||
423 | void | |
424 | StoreEntry::hashInsert(const cache_key * someKey) | |
425 | { | |
426 | debugs(20, 3, "StoreEntry::hashInsert: Inserting Entry " << *this << " key '" << storeKeyText(someKey) << "'"); | |
427 | assert(!key); | |
428 | key = storeKeyDup(someKey); | |
429 | hash_join(store_table, this); | |
430 | } | |
431 | ||
432 | void | |
433 | StoreEntry::hashDelete() | |
434 | { | |
435 | if (key) { // some test cases do not create keys and do not hashInsert() | |
436 | hash_remove_link(store_table, this); | |
437 | storeKeyFree((const cache_key *)key); | |
438 | key = nullptr; | |
439 | } | |
440 | } | |
441 | ||
442 | /* -------------------------------------------------------------------------- */ | |
443 | ||
444 | void | |
445 | StoreEntry::lock(const char *context) | |
446 | { | |
447 | ++lock_count; | |
448 | debugs(20, 3, context << " locked key " << getMD5Text() << ' ' << *this); | |
449 | } | |
450 | ||
451 | void | |
452 | StoreEntry::touch() | |
453 | { | |
454 | lastref = squid_curtime; | |
455 | } | |
456 | ||
457 | void | |
458 | StoreEntry::releaseRequest(const bool shareable) | |
459 | { | |
460 | debugs(20, 3, shareable << ' ' << *this); | |
461 | if (!shareable) | |
462 | shareableWhenPrivate = false; // may already be false | |
463 | if (EBIT_TEST(flags, RELEASE_REQUEST)) | |
464 | return; | |
465 | setPrivateKey(shareable, true); | |
466 | } | |
467 | ||
468 | int | |
469 | StoreEntry::unlock(const char *context) | |
470 | { | |
471 | debugs(20, 3, (context ? context : "somebody") << | |
472 | " unlocking key " << getMD5Text() << ' ' << *this); | |
473 | assert(lock_count > 0); | |
474 | --lock_count; | |
475 | ||
476 | if (lock_count) | |
477 | return (int) lock_count; | |
478 | ||
479 | abandon(context); | |
480 | return 0; | |
481 | } | |
482 | ||
483 | /// keep the unlocked StoreEntry object in the local store_table (if needed) or | |
484 | /// delete it (otherwise) | |
485 | void | |
486 | StoreEntry::doAbandon(const char *context) | |
487 | { | |
488 | debugs(20, 5, *this << " via " << (context ? context : "somebody")); | |
489 | assert(!locked()); | |
490 | assert(storePendingNClients(this) == 0); | |
491 | ||
492 | // Both aborted local writers and aborted local readers (of remote writers) | |
493 | // are STORE_PENDING, but aborted readers should never release(). | |
494 | if (EBIT_TEST(flags, RELEASE_REQUEST) || | |
495 | (store_status == STORE_PENDING && !Store::Root().transientsReader(*this))) { | |
496 | this->release(); | |
497 | return; | |
498 | } | |
499 | ||
500 | Store::Root().handleIdleEntry(*this); // may delete us | |
501 | } | |
502 | ||
503 | StoreEntry * | |
504 | storeGetPublic(const char *uri, const HttpRequestMethod& method) | |
505 | { | |
506 | return Store::Root().find(storeKeyPublic(uri, method)); | |
507 | } | |
508 | ||
509 | StoreEntry * | |
510 | storeGetPublicByRequestMethod(HttpRequest * req, const HttpRequestMethod& method, const KeyScope keyScope) | |
511 | { | |
512 | return Store::Root().find(storeKeyPublicByRequestMethod(req, method, keyScope)); | |
513 | } | |
514 | ||
515 | StoreEntry * | |
516 | storeGetPublicByRequest(HttpRequest * req, const KeyScope keyScope) | |
517 | { | |
518 | StoreEntry *e = storeGetPublicByRequestMethod(req, req->method, keyScope); | |
519 | ||
520 | if (e == nullptr && req->method == Http::METHOD_HEAD) | |
521 | /* We can generate a HEAD reply from a cached GET object */ | |
522 | e = storeGetPublicByRequestMethod(req, Http::METHOD_GET, keyScope); | |
523 | ||
524 | return e; | |
525 | } | |
526 | ||
527 | static int | |
528 | getKeyCounter(void) | |
529 | { | |
530 | static int key_counter = 0; | |
531 | ||
532 | if (++key_counter < 0) | |
533 | key_counter = 1; | |
534 | ||
535 | return key_counter; | |
536 | } | |
537 | ||
538 | /* RBC 20050104 AFAICT this should become simpler: | |
539 | * rather than reinserting with a special key it should be marked | |
540 | * as 'released' and then cleaned up when refcounting indicates. | |
541 | * the StoreHashIndex could well implement its 'released' in the | |
542 | * current manner. | |
543 | * Also, clean log writing should skip over ia,t | |
544 | * Otherwise, we need a 'remove from the index but not the store | |
545 | * concept'. | |
546 | */ | |
547 | void | |
548 | StoreEntry::setPrivateKey(const bool shareable, const bool permanent) | |
549 | { | |
550 | debugs(20, 3, shareable << permanent << ' ' << *this); | |
551 | if (permanent) | |
552 | EBIT_SET(flags, RELEASE_REQUEST); // may already be set | |
553 | if (!shareable) | |
554 | shareableWhenPrivate = false; // may already be false | |
555 | ||
556 | if (EBIT_TEST(flags, KEY_PRIVATE)) | |
557 | return; | |
558 | ||
559 | if (key) { | |
560 | Store::Root().evictCached(*this); // all caches/workers will know | |
561 | hashDelete(); | |
562 | } | |
563 | ||
564 | if (mem_obj && mem_obj->hasUris()) | |
565 | mem_obj->id = getKeyCounter(); | |
566 | const cache_key *newkey = storeKeyPrivate(); | |
567 | ||
568 | assert(hash_lookup(store_table, newkey) == nullptr); | |
569 | EBIT_SET(flags, KEY_PRIVATE); | |
570 | shareableWhenPrivate = shareable; | |
571 | hashInsert(newkey); | |
572 | } | |
573 | ||
574 | bool | |
575 | StoreEntry::setPublicKey(const KeyScope scope) | |
576 | { | |
577 | debugs(20, 3, *this); | |
578 | if (key && !EBIT_TEST(flags, KEY_PRIVATE)) | |
579 | return true; // already public | |
580 | ||
581 | assert(mem_obj); | |
582 | ||
583 | /* | |
584 | * We can't make RELEASE_REQUEST objects public. Depending on | |
585 | * when RELEASE_REQUEST gets set, we might not be swapping out | |
586 | * the object. If we're not swapping out, then subsequent | |
587 | * store clients won't be able to access object data which has | |
588 | * been freed from memory. | |
589 | * | |
590 | * If RELEASE_REQUEST is set, setPublicKey() should not be called. | |
591 | */ | |
592 | ||
593 | assert(!EBIT_TEST(flags, RELEASE_REQUEST)); | |
594 | ||
595 | try { | |
596 | EntryGuard newVaryMarker(adjustVary(), "setPublicKey+failure"); | |
597 | const cache_key *pubKey = calcPublicKey(scope); | |
598 | Store::Root().addWriting(this, pubKey); | |
599 | forcePublicKey(pubKey); | |
600 | newVaryMarker.unlockAndReset("setPublicKey+success"); | |
601 | return true; | |
602 | } catch (const std::exception &ex) { | |
603 | debugs(20, 2, "for " << *this << " failed: " << ex.what()); | |
604 | } | |
605 | return false; | |
606 | } | |
607 | ||
608 | void | |
609 | StoreEntry::clearPublicKeyScope() | |
610 | { | |
611 | if (!key || EBIT_TEST(flags, KEY_PRIVATE)) | |
612 | return; // probably the old public key was deleted or made private | |
613 | ||
614 | // TODO: adjustVary() when collapsed revalidation supports that | |
615 | ||
616 | const cache_key *newKey = calcPublicKey(ksDefault); | |
617 | if (!storeKeyHashCmp(key, newKey)) | |
618 | return; // probably another collapsed revalidation beat us to this change | |
619 | ||
620 | forcePublicKey(newKey); | |
621 | } | |
622 | ||
623 | /// Unconditionally sets public key for this store entry. | |
624 | /// Releases the old entry with the same public key (if any). | |
625 | void | |
626 | StoreEntry::forcePublicKey(const cache_key *newkey) | |
627 | { | |
628 | debugs(20, 3, storeKeyText(newkey) << " for " << *this); | |
629 | assert(mem_obj); | |
630 | ||
631 | if (StoreEntry *e2 = (StoreEntry *)hash_lookup(store_table, newkey)) { | |
632 | assert(e2 != this); | |
633 | debugs(20, 3, "releasing clashing " << *e2); | |
634 | e2->release(true); | |
635 | } | |
636 | ||
637 | if (key) | |
638 | hashDelete(); | |
639 | ||
640 | clearPrivate(); | |
641 | ||
642 | assert(mem_obj->hasUris()); | |
643 | hashInsert(newkey); | |
644 | ||
645 | if (hasDisk()) | |
646 | storeDirSwapLog(this, SWAP_LOG_ADD); | |
647 | } | |
648 | ||
649 | /// Calculates correct public key for feeding forcePublicKey(). | |
650 | /// Assumes adjustVary() has been called for this entry already. | |
651 | const cache_key * | |
652 | StoreEntry::calcPublicKey(const KeyScope keyScope) | |
653 | { | |
654 | assert(mem_obj); | |
655 | return mem_obj->request ? storeKeyPublicByRequest(mem_obj->request.getRaw(), keyScope) : | |
656 | storeKeyPublic(mem_obj->storeId(), mem_obj->method, keyScope); | |
657 | } | |
658 | ||
659 | /// Updates mem_obj->request->vary_headers to reflect the current Vary. | |
660 | /// The vary_headers field is used to calculate the Vary marker key. | |
661 | /// Releases the old Vary marker with an outdated key (if any). | |
662 | /// \returns new (locked) Vary marker StoreEntry or, if none was needed, nil | |
663 | /// \throws std::exception on failures | |
664 | StoreEntry * | |
665 | StoreEntry::adjustVary() | |
666 | { | |
667 | assert(mem_obj); | |
668 | ||
669 | if (!mem_obj->request) | |
670 | return nullptr; | |
671 | ||
672 | HttpRequestPointer request(mem_obj->request); | |
673 | const auto &reply = mem_obj->freshestReply(); | |
674 | ||
675 | if (mem_obj->vary_headers.isEmpty()) { | |
676 | /* First handle the case where the object no longer varies */ | |
677 | request->vary_headers.clear(); | |
678 | } else { | |
679 | if (!request->vary_headers.isEmpty() && request->vary_headers.cmp(mem_obj->vary_headers) != 0) { | |
680 | /* Oops.. the variance has changed. Kill the base object | |
681 | * to record the new variance key | |
682 | */ | |
683 | request->vary_headers.clear(); /* free old "bad" variance key */ | |
684 | if (StoreEntry *pe = storeGetPublic(mem_obj->storeId(), mem_obj->method)) | |
685 | pe->release(true); | |
686 | } | |
687 | ||
688 | /* Make sure the request knows the variance status */ | |
689 | if (request->vary_headers.isEmpty()) | |
690 | request->vary_headers = httpMakeVaryMark(request.getRaw(), &reply); | |
691 | } | |
692 | ||
693 | // TODO: storeGetPublic() calls below may create unlocked entries. | |
694 | // We should add/use storeHas() API or lock/unlock those entries. | |
695 | if (!mem_obj->vary_headers.isEmpty() && !storeGetPublic(mem_obj->storeId(), mem_obj->method)) { | |
696 | /* Create "vary" base object */ | |
697 | StoreEntry *pe = storeCreateEntry(mem_obj->storeId(), mem_obj->logUri(), request->flags, request->method); | |
698 | // XXX: storeCreateEntry() already tries to make `pe` public under | |
699 | // certain conditions. If those conditions do not apply to Vary markers, | |
700 | // then refactor to call storeCreatePureEntry() above. Otherwise, | |
701 | // refactor to simply check whether `pe` is already public below. | |
702 | if (!pe->makePublic()) { | |
703 | pe->unlock("StoreEntry::adjustVary+failed_makePublic"); | |
704 | throw TexcHere("failed to make Vary marker public"); | |
705 | } | |
706 | /* We are allowed to do this typecast */ | |
707 | const HttpReplyPointer rep(new HttpReply); | |
708 | rep->setHeaders(Http::scOkay, "Internal marker object", "x-squid-internal/vary", -1, -1, squid_curtime + 100000); | |
709 | auto vary = reply.header.getList(Http::HdrType::VARY); | |
710 | ||
711 | if (vary.size()) { | |
712 | /* Again, we own this structure layout */ | |
713 | rep->header.putStr(Http::HdrType::VARY, vary.termedBuf()); | |
714 | vary.clean(); | |
715 | } | |
716 | ||
717 | #if X_ACCELERATOR_VARY | |
718 | vary = reply.header.getList(Http::HdrType::HDR_X_ACCELERATOR_VARY); | |
719 | ||
720 | if (vary.size() > 0) { | |
721 | /* Again, we own this structure layout */ | |
722 | rep->header.putStr(Http::HdrType::HDR_X_ACCELERATOR_VARY, vary.termedBuf()); | |
723 | vary.clean(); | |
724 | } | |
725 | ||
726 | #endif | |
727 | pe->replaceHttpReply(rep, false); // no write until timestampsSet() | |
728 | ||
729 | pe->timestampsSet(); | |
730 | ||
731 | pe->startWriting(); // after timestampsSet() | |
732 | ||
733 | pe->completeSuccessfully("wrote the entire Vary marker object"); | |
734 | ||
735 | return pe; | |
736 | } | |
737 | return nullptr; | |
738 | } | |
739 | ||
740 | StoreEntry * | |
741 | storeCreatePureEntry(const char *url, const char *log_url, const HttpRequestMethod& method) | |
742 | { | |
743 | StoreEntry *e = nullptr; | |
744 | debugs(20, 3, "storeCreateEntry: '" << url << "'"); | |
745 | ||
746 | e = new StoreEntry(); | |
747 | e->createMemObject(url, log_url, method); | |
748 | ||
749 | e->store_status = STORE_PENDING; | |
750 | e->refcount = 0; | |
751 | e->lastref = squid_curtime; | |
752 | e->timestamp = -1; /* set in StoreEntry::timestampsSet() */ | |
753 | e->ping_status = PING_NONE; | |
754 | EBIT_SET(e->flags, ENTRY_VALIDATED); | |
755 | return e; | |
756 | } | |
757 | ||
758 | StoreEntry * | |
759 | storeCreateEntry(const char *url, const char *logUrl, const RequestFlags &flags, const HttpRequestMethod& method) | |
760 | { | |
761 | StoreEntry *e = storeCreatePureEntry(url, logUrl, method); | |
762 | e->lock("storeCreateEntry"); | |
763 | ||
764 | if (!neighbors_do_private_keys && flags.hierarchical && flags.cachable && e->setPublicKey()) | |
765 | return e; | |
766 | ||
767 | e->setPrivateKey(false, !flags.cachable); | |
768 | return e; | |
769 | } | |
770 | ||
771 | /* Mark object as expired */ | |
772 | void | |
773 | StoreEntry::expireNow() | |
774 | { | |
775 | debugs(20, 3, "StoreEntry::expireNow: '" << getMD5Text() << "'"); | |
776 | expires = squid_curtime; | |
777 | } | |
778 | ||
779 | void | |
780 | StoreEntry::write (StoreIOBuffer writeBuffer) | |
781 | { | |
782 | assert(mem_obj != nullptr); | |
783 | /* This assert will change when we teach the store to update */ | |
784 | assert(store_status == STORE_PENDING); | |
785 | ||
786 | // XXX: caller uses content offset, but we also store headers | |
787 | writeBuffer.offset += mem_obj->baseReply().hdr_sz; | |
788 | ||
789 | debugs(20, 5, "storeWrite: writing " << writeBuffer.length << " bytes for '" << getMD5Text() << "'"); | |
790 | storeGetMemSpace(writeBuffer.length); | |
791 | mem_obj->write(writeBuffer); | |
792 | ||
793 | if (EBIT_TEST(flags, ENTRY_FWD_HDR_WAIT) && !mem_obj->readAheadPolicyCanRead()) { | |
794 | debugs(20, 3, "allow Store clients to get entry content after buffering too much for " << *this); | |
795 | EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT); | |
796 | } | |
797 | ||
798 | invokeHandlers(); | |
799 | } | |
800 | ||
801 | /* Append incoming data from a primary server to an entry. */ | |
802 | void | |
803 | StoreEntry::append(char const *buf, int len) | |
804 | { | |
805 | assert(mem_obj != nullptr); | |
806 | assert(len >= 0); | |
807 | assert(store_status == STORE_PENDING); | |
808 | ||
809 | StoreIOBuffer tempBuffer; | |
810 | tempBuffer.data = (char *)buf; | |
811 | tempBuffer.length = len; | |
812 | /* | |
813 | * XXX sigh, offset might be < 0 here, but it gets "corrected" | |
814 | * later. This offset crap is such a mess. | |
815 | */ | |
816 | tempBuffer.offset = mem_obj->endOffset() - mem_obj->baseReply().hdr_sz; | |
817 | write(tempBuffer); | |
818 | } | |
819 | ||
820 | void | |
821 | StoreEntry::vappendf(const char *fmt, va_list vargs) | |
822 | { | |
823 | LOCAL_ARRAY(char, buf, 4096); | |
824 | *buf = 0; | |
825 | int x; | |
826 | ||
827 | va_list ap; | |
828 | /* Fix of bug 753r. The value of vargs is undefined | |
829 | * after vsnprintf() returns. Make a copy of vargs | |
830 | * in case we loop around and call vsnprintf() again. | |
831 | */ | |
832 | va_copy(ap,vargs); | |
833 | errno = 0; | |
834 | if ((x = vsnprintf(buf, sizeof(buf), fmt, ap)) < 0) { | |
835 | fatal(xstrerr(errno)); | |
836 | return; | |
837 | } | |
838 | va_end(ap); | |
839 | ||
840 | if (x < static_cast<int>(sizeof(buf))) { | |
841 | append(buf, x); | |
842 | return; | |
843 | } | |
844 | ||
845 | // okay, do it the slow way. | |
846 | char *buf2 = new char[x+1]; | |
847 | int y = vsnprintf(buf2, x+1, fmt, vargs); | |
848 | assert(y >= 0 && y == x); | |
849 | append(buf2, y); | |
850 | delete[] buf2; | |
851 | } | |
852 | ||
853 | // deprecated. use StoreEntry::appendf() instead. | |
854 | void | |
855 | storeAppendPrintf(StoreEntry * e, const char *fmt,...) | |
856 | { | |
857 | va_list args; | |
858 | va_start(args, fmt); | |
859 | e->vappendf(fmt, args); | |
860 | va_end(args); | |
861 | } | |
862 | ||
863 | // deprecated. use StoreEntry::appendf() instead. | |
864 | void | |
865 | storeAppendVPrintf(StoreEntry * e, const char *fmt, va_list vargs) | |
866 | { | |
867 | e->vappendf(fmt, vargs); | |
868 | } | |
869 | ||
870 | struct _store_check_cachable_hist { | |
871 | ||
872 | struct { | |
873 | int not_entry_cachable; | |
874 | int wrong_content_length; | |
875 | int too_big; | |
876 | int too_small; | |
877 | int private_key; | |
878 | int too_many_open_files; | |
879 | int too_many_open_fds; | |
880 | int missing_parts; | |
881 | } no; | |
882 | ||
883 | struct { | |
884 | int Default; | |
885 | } yes; | |
886 | } store_check_cachable_hist; | |
887 | ||
888 | int | |
889 | storeTooManyDiskFilesOpen(void) | |
890 | { | |
891 | if (Config.max_open_disk_fds == 0) | |
892 | return 0; | |
893 | ||
894 | if (store_open_disk_fd > Config.max_open_disk_fds) | |
895 | return 1; | |
896 | ||
897 | return 0; | |
898 | } | |
899 | ||
900 | int | |
901 | StoreEntry::checkTooSmall() | |
902 | { | |
903 | if (EBIT_TEST(flags, ENTRY_SPECIAL)) | |
904 | return 0; | |
905 | ||
906 | if (STORE_OK == store_status) | |
907 | if (mem_obj->object_sz >= 0 && | |
908 | mem_obj->object_sz < Config.Store.minObjectSize) | |
909 | return 1; | |
910 | ||
911 | const auto clen = mem().baseReply().content_length; | |
912 | if (clen >= 0 && clen < Config.Store.minObjectSize) | |
913 | return 1; | |
914 | return 0; | |
915 | } | |
916 | ||
917 | bool | |
918 | StoreEntry::checkTooBig() const | |
919 | { | |
920 | if (mem_obj->endOffset() > store_maxobjsize) | |
921 | return true; | |
922 | ||
923 | const auto clen = mem_obj->baseReply().content_length; | |
924 | return (clen >= 0 && clen > store_maxobjsize); | |
925 | } | |
926 | ||
927 | // TODO: move "too many open..." checks outside -- we are called too early/late | |
928 | bool | |
929 | StoreEntry::checkCachable() | |
930 | { | |
931 | // XXX: This method is used for both memory and disk caches, but some | |
932 | // checks are specific to disk caches. Move them to mayStartSwapOut(). | |
933 | ||
934 | // XXX: This method may be called several times, sometimes with different | |
935 | // outcomes, making store_check_cachable_hist counters misleading. | |
936 | ||
937 | // check this first to optimize handling of repeated calls for uncachables | |
938 | if (EBIT_TEST(flags, RELEASE_REQUEST)) { | |
939 | debugs(20, 2, "StoreEntry::checkCachable: NO: not cachable"); | |
940 | ++store_check_cachable_hist.no.not_entry_cachable; // TODO: rename? | |
941 | return 0; // avoid rerequesting release below | |
942 | } | |
943 | ||
944 | if (EBIT_TEST(flags, ENTRY_BAD_LENGTH)) { | |
945 | debugs(20, 2, "StoreEntry::checkCachable: NO: wrong content-length"); | |
946 | ++store_check_cachable_hist.no.wrong_content_length; | |
947 | } else if (!mem_obj) { | |
948 | // XXX: In bug 4131, we forgetHit() without mem_obj, so we need | |
949 | // this segfault protection, but how can we get such a HIT? | |
950 | debugs(20, 2, "StoreEntry::checkCachable: NO: missing parts: " << *this); | |
951 | ++store_check_cachable_hist.no.missing_parts; | |
952 | } else if (checkTooBig()) { | |
953 | debugs(20, 2, "StoreEntry::checkCachable: NO: too big"); | |
954 | ++store_check_cachable_hist.no.too_big; | |
955 | } else if (checkTooSmall()) { | |
956 | debugs(20, 2, "StoreEntry::checkCachable: NO: too small"); | |
957 | ++store_check_cachable_hist.no.too_small; | |
958 | } else if (EBIT_TEST(flags, KEY_PRIVATE)) { | |
959 | debugs(20, 3, "StoreEntry::checkCachable: NO: private key"); | |
960 | ++store_check_cachable_hist.no.private_key; | |
961 | } else if (hasDisk()) { | |
962 | /* | |
963 | * the remaining cases are only relevant if we haven't | |
964 | * started swapping out the object yet. | |
965 | */ | |
966 | return 1; | |
967 | } else if (storeTooManyDiskFilesOpen()) { | |
968 | debugs(20, 2, "StoreEntry::checkCachable: NO: too many disk files open"); | |
969 | ++store_check_cachable_hist.no.too_many_open_files; | |
970 | } else if (fdNFree() < RESERVED_FD) { | |
971 | debugs(20, 2, "StoreEntry::checkCachable: NO: too many FD's open"); | |
972 | ++store_check_cachable_hist.no.too_many_open_fds; | |
973 | } else { | |
974 | ++store_check_cachable_hist.yes.Default; | |
975 | return 1; | |
976 | } | |
977 | ||
978 | releaseRequest(); | |
979 | return 0; | |
980 | } | |
981 | ||
982 | void | |
983 | storeCheckCachableStats(StoreEntry *sentry) | |
984 | { | |
985 | storeAppendPrintf(sentry, "Category\t Count\n"); | |
986 | storeAppendPrintf(sentry, "no.not_entry_cachable\t%d\n", | |
987 | store_check_cachable_hist.no.not_entry_cachable); | |
988 | storeAppendPrintf(sentry, "no.wrong_content_length\t%d\n", | |
989 | store_check_cachable_hist.no.wrong_content_length); | |
990 | storeAppendPrintf(sentry, "no.negative_cached\t%d\n", | |
991 | 0); // TODO: Remove this backward compatibility hack. | |
992 | storeAppendPrintf(sentry, "no.missing_parts\t%d\n", | |
993 | store_check_cachable_hist.no.missing_parts); | |
994 | storeAppendPrintf(sentry, "no.too_big\t%d\n", | |
995 | store_check_cachable_hist.no.too_big); | |
996 | storeAppendPrintf(sentry, "no.too_small\t%d\n", | |
997 | store_check_cachable_hist.no.too_small); | |
998 | storeAppendPrintf(sentry, "no.private_key\t%d\n", | |
999 | store_check_cachable_hist.no.private_key); | |
1000 | storeAppendPrintf(sentry, "no.too_many_open_files\t%d\n", | |
1001 | store_check_cachable_hist.no.too_many_open_files); | |
1002 | storeAppendPrintf(sentry, "no.too_many_open_fds\t%d\n", | |
1003 | store_check_cachable_hist.no.too_many_open_fds); | |
1004 | storeAppendPrintf(sentry, "yes.default\t%d\n", | |
1005 | store_check_cachable_hist.yes.Default); | |
1006 | } | |
1007 | ||
1008 | void | |
1009 | StoreEntry::lengthWentBad(const char *reason) | |
1010 | { | |
1011 | debugs(20, 3, "because " << reason << ": " << *this); | |
1012 | EBIT_SET(flags, ENTRY_BAD_LENGTH); | |
1013 | releaseRequest(); | |
1014 | } | |
1015 | ||
1016 | void | |
1017 | StoreEntry::completeSuccessfully(const char * const whyWeAreSure) | |
1018 | { | |
1019 | debugs(20, 3, whyWeAreSure << "; " << *this); | |
1020 | complete(); | |
1021 | } | |
1022 | ||
1023 | void | |
1024 | StoreEntry::completeTruncated(const char * const truncationReason) | |
1025 | { | |
1026 | lengthWentBad(truncationReason); | |
1027 | complete(); | |
1028 | } | |
1029 | ||
1030 | void | |
1031 | StoreEntry::complete() | |
1032 | { | |
1033 | debugs(20, 3, "storeComplete: '" << getMD5Text() << "'"); | |
1034 | ||
1035 | // To preserve forwarding retries, call FwdState::complete() instead. | |
1036 | EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT); | |
1037 | ||
1038 | if (store_status != STORE_PENDING) { | |
1039 | /* | |
1040 | * if we're not STORE_PENDING, then probably we got aborted | |
1041 | * and there should be NO clients on this entry | |
1042 | */ | |
1043 | assert(EBIT_TEST(flags, ENTRY_ABORTED)); | |
1044 | assert(mem_obj->nclients == 0); | |
1045 | return; | |
1046 | } | |
1047 | ||
1048 | mem_obj->object_sz = mem_obj->endOffset(); | |
1049 | ||
1050 | store_status = STORE_OK; | |
1051 | ||
1052 | assert(mem_status == NOT_IN_MEMORY); | |
1053 | ||
1054 | if (!EBIT_TEST(flags, ENTRY_BAD_LENGTH) && !validLength()) | |
1055 | lengthWentBad("!validLength() in complete()"); | |
1056 | ||
1057 | #if USE_CACHE_DIGESTS | |
1058 | if (mem_obj->request) | |
1059 | mem_obj->request->hier.store_complete_stop = current_time; | |
1060 | ||
1061 | #endif | |
1062 | /* | |
1063 | * We used to call invokeHandlers, then storeSwapOut. However, | |
1064 | * Madhukar Reddy <myreddy@persistence.com> reported that | |
1065 | * responses without content length would sometimes get released | |
1066 | * in client_side, thinking that the response is incomplete. | |
1067 | */ | |
1068 | invokeHandlers(); | |
1069 | } | |
1070 | ||
1071 | /* | |
1072 | * Someone wants to abort this transfer. Set the reason in the | |
1073 | * request structure, call the callback and mark the | |
1074 | * entry for releasing | |
1075 | */ | |
1076 | void | |
1077 | StoreEntry::abort() | |
1078 | { | |
1079 | ++statCounter.aborted_requests; | |
1080 | assert(store_status == STORE_PENDING); | |
1081 | assert(mem_obj != nullptr); | |
1082 | debugs(20, 6, "storeAbort: " << getMD5Text()); | |
1083 | ||
1084 | lock("StoreEntry::abort"); /* lock while aborting */ | |
1085 | negativeCache(); | |
1086 | ||
1087 | releaseRequest(); | |
1088 | ||
1089 | EBIT_SET(flags, ENTRY_ABORTED); | |
1090 | ||
1091 | // allow the Store clients to be told about the problem | |
1092 | EBIT_CLR(flags, ENTRY_FWD_HDR_WAIT); | |
1093 | ||
1094 | setMemStatus(NOT_IN_MEMORY); | |
1095 | ||
1096 | store_status = STORE_OK; | |
1097 | ||
1098 | /* Notify the server side */ | |
1099 | ||
1100 | if (mem_obj->abortCallback) { | |
1101 | ScheduleCallHere(mem_obj->abortCallback); | |
1102 | mem_obj->abortCallback = nullptr; | |
1103 | } | |
1104 | ||
1105 | /* XXX Should we reverse these two, so that there is no | |
1106 | * unneeded disk swapping triggered? | |
1107 | */ | |
1108 | /* Notify the client side */ | |
1109 | invokeHandlers(); | |
1110 | ||
1111 | // abort swap out, invalidating what was created so far (release follows) | |
1112 | swapOutFileClose(StoreIOState::writerGone); | |
1113 | ||
1114 | unlock("StoreEntry::abort"); /* unlock */ | |
1115 | } | |
1116 | ||
1117 | /** | |
1118 | * Clear Memory storage to accommodate the given object len | |
1119 | */ | |
1120 | void | |
1121 | storeGetMemSpace(int size) | |
1122 | { | |
1123 | Store::Root().freeMemorySpace(size); | |
1124 | } | |
1125 | ||
1126 | /* thunk through to Store::Root().maintain(). Note that this would be better still | |
1127 | * if registered against the root store itself, but that requires more complex | |
1128 | * update logic - bigger fish to fry first. Long term each store when | |
1129 | * it becomes active will self register | |
1130 | */ | |
1131 | void | |
1132 | Store::Maintain(void *) | |
1133 | { | |
1134 | Store::Root().maintain(); | |
1135 | ||
1136 | /* Reregister a maintain event .. */ | |
1137 | eventAdd("MaintainSwapSpace", Maintain, nullptr, 1.0, 1); | |
1138 | ||
1139 | } | |
1140 | ||
1141 | /* The maximum objects to scan for maintain storage space */ | |
1142 | #define MAINTAIN_MAX_SCAN 1024 | |
1143 | #define MAINTAIN_MAX_REMOVE 64 | |
1144 | ||
1145 | void | |
1146 | StoreEntry::release(const bool shareable) | |
1147 | { | |
1148 | debugs(20, 3, shareable << ' ' << *this << ' ' << getMD5Text()); | |
1149 | /* If, for any reason we can't discard this object because of an | |
1150 | * outstanding request, mark it for pending release */ | |
1151 | ||
1152 | if (locked()) { | |
1153 | releaseRequest(shareable); | |
1154 | return; | |
1155 | } | |
1156 | ||
1157 | if (Store::Controller::store_dirs_rebuilding && hasDisk()) { | |
1158 | /* TODO: Teach disk stores to handle releases during rebuild instead. */ | |
1159 | ||
1160 | // lock the entry until rebuilding is done | |
1161 | lock("storeLateRelease"); | |
1162 | releaseRequest(shareable); | |
1163 | LateReleaseStack.push(this); | |
1164 | return; | |
1165 | } | |
1166 | ||
1167 | storeLog(STORE_LOG_RELEASE, this); | |
1168 | Store::Root().evictCached(*this); | |
1169 | destroyStoreEntry(static_cast<hash_link *>(this)); | |
1170 | } | |
1171 | ||
1172 | static void | |
1173 | storeLateRelease(void *) | |
1174 | { | |
1175 | StoreEntry *e; | |
1176 | static int n = 0; | |
1177 | ||
1178 | if (Store::Controller::store_dirs_rebuilding) { | |
1179 | eventAdd("storeLateRelease", storeLateRelease, nullptr, 1.0, 1); | |
1180 | return; | |
1181 | } | |
1182 | ||
1183 | // TODO: this works but looks unelegant. | |
1184 | for (int i = 0; i < 10; ++i) { | |
1185 | if (LateReleaseStack.empty()) { | |
1186 | debugs(20, Important(30), "storeLateRelease: released " << n << " objects"); | |
1187 | return; | |
1188 | } else { | |
1189 | e = LateReleaseStack.top(); | |
1190 | LateReleaseStack.pop(); | |
1191 | } | |
1192 | ||
1193 | e->unlock("storeLateRelease"); | |
1194 | ++n; | |
1195 | } | |
1196 | ||
1197 | eventAdd("storeLateRelease", storeLateRelease, nullptr, 0.0, 1); | |
1198 | } | |
1199 | ||
1200 | /// whether the base response has all the body bytes we expect | |
1201 | /// \returns true for responses with unknown/unspecified body length | |
1202 | /// \returns true for responses with the right number of accumulated body bytes | |
1203 | bool | |
1204 | StoreEntry::validLength() const | |
1205 | { | |
1206 | int64_t diff; | |
1207 | assert(mem_obj != nullptr); | |
1208 | const auto reply = &mem_obj->baseReply(); | |
1209 | debugs(20, 3, "storeEntryValidLength: Checking '" << getMD5Text() << "'"); | |
1210 | debugs(20, 5, "storeEntryValidLength: object_len = " << | |
1211 | objectLen()); | |
1212 | debugs(20, 5, "storeEntryValidLength: hdr_sz = " << reply->hdr_sz); | |
1213 | debugs(20, 5, "storeEntryValidLength: content_length = " << reply->content_length); | |
1214 | ||
1215 | if (reply->content_length < 0) { | |
1216 | debugs(20, 5, "storeEntryValidLength: Unspecified content length: " << getMD5Text()); | |
1217 | return 1; | |
1218 | } | |
1219 | ||
1220 | if (reply->hdr_sz == 0) { | |
1221 | debugs(20, 5, "storeEntryValidLength: Zero header size: " << getMD5Text()); | |
1222 | return 1; | |
1223 | } | |
1224 | ||
1225 | if (mem_obj->method == Http::METHOD_HEAD) { | |
1226 | debugs(20, 5, "storeEntryValidLength: HEAD request: " << getMD5Text()); | |
1227 | return 1; | |
1228 | } | |
1229 | ||
1230 | if (reply->sline.status() == Http::scNotModified) | |
1231 | return 1; | |
1232 | ||
1233 | if (reply->sline.status() == Http::scNoContent) | |
1234 | return 1; | |
1235 | ||
1236 | diff = reply->hdr_sz + reply->content_length - objectLen(); | |
1237 | ||
1238 | if (diff == 0) | |
1239 | return 1; | |
1240 | ||
1241 | debugs(20, 3, "storeEntryValidLength: " << (diff < 0 ? -diff : diff) << " bytes too " << (diff < 0 ? "big" : "small") <<"; '" << getMD5Text() << "'" ); | |
1242 | ||
1243 | return 0; | |
1244 | } | |
1245 | ||
1246 | static void | |
1247 | storeRegisterWithCacheManager(void) | |
1248 | { | |
1249 | Mgr::RegisterAction("storedir", "Store Directory Stats", Store::Stats, 0, 1); | |
1250 | Mgr::RegisterAction("store_io", "Store IO Interface Stats", &Mgr::StoreIoAction::Create, 0, 1); | |
1251 | Mgr::RegisterAction("store_check_cachable_stats", "storeCheckCachable() Stats", | |
1252 | storeCheckCachableStats, 0, 1); | |
1253 | Mgr::RegisterAction("store_queues", "SMP Transients and Caching Queues", StatQueues, 0, 1); | |
1254 | } | |
1255 | ||
1256 | void | |
1257 | storeInit(void) | |
1258 | { | |
1259 | mem_policy = createRemovalPolicy(Config.memPolicy); | |
1260 | storeDigestInit(); | |
1261 | storeLogOpen(); | |
1262 | eventAdd("storeLateRelease", storeLateRelease, nullptr, 1.0, 1); | |
1263 | Store::Root().init(); | |
1264 | storeRebuildStart(); | |
1265 | ||
1266 | storeRegisterWithCacheManager(); | |
1267 | } | |
1268 | ||
1269 | void | |
1270 | storeConfigure(void) | |
1271 | { | |
1272 | Store::Root().configure(); | |
1273 | } | |
1274 | ||
1275 | bool | |
1276 | StoreEntry::memoryCachable() | |
1277 | { | |
1278 | if (!checkCachable()) | |
1279 | return 0; | |
1280 | ||
1281 | if (shutting_down) | |
1282 | return 0; // avoid heavy optional work during shutdown | |
1283 | ||
1284 | if (mem_obj == nullptr) | |
1285 | return 0; | |
1286 | ||
1287 | if (mem_obj->data_hdr.size() == 0) | |
1288 | return 0; | |
1289 | ||
1290 | if (mem_obj->inmem_lo != 0) | |
1291 | return 0; | |
1292 | ||
1293 | if (!Config.onoff.memory_cache_first && swappedOut() && refcount == 1) | |
1294 | return 0; | |
1295 | ||
1296 | return 1; | |
1297 | } | |
1298 | ||
1299 | int | |
1300 | StoreEntry::checkNegativeHit() const | |
1301 | { | |
1302 | if (!EBIT_TEST(flags, ENTRY_NEGCACHED)) | |
1303 | return 0; | |
1304 | ||
1305 | if (expires <= squid_curtime) | |
1306 | return 0; | |
1307 | ||
1308 | if (store_status != STORE_OK) | |
1309 | return 0; | |
1310 | ||
1311 | return 1; | |
1312 | } | |
1313 | ||
1314 | /** | |
1315 | * Set object for negative caching. | |
1316 | * Preserves any expiry information given by the server. | |
1317 | * In absence of proper expiry info it will set to expire immediately, | |
1318 | * or with HTTP-violations enabled the configured negative-TTL is observed | |
1319 | */ | |
1320 | void | |
1321 | StoreEntry::negativeCache() | |
1322 | { | |
1323 | // XXX: should make the default for expires 0 instead of -1 | |
1324 | // so we can distinguish "Expires: -1" from nothing. | |
1325 | if (expires <= 0) | |
1326 | #if USE_HTTP_VIOLATIONS | |
1327 | expires = squid_curtime + Config.negativeTtl; | |
1328 | #else | |
1329 | expires = squid_curtime; | |
1330 | #endif | |
1331 | if (expires > squid_curtime) { | |
1332 | EBIT_SET(flags, ENTRY_NEGCACHED); | |
1333 | debugs(20, 6, "expires = " << expires << " +" << (expires-squid_curtime) << ' ' << *this); | |
1334 | } | |
1335 | } | |
1336 | ||
1337 | int | |
1338 | expiresMoreThan(time_t expires, time_t when) | |
1339 | { | |
1340 | if (expires < 0) /* No Expires given */ | |
1341 | return 1; | |
1342 | ||
1343 | return (expires > (squid_curtime + when)); | |
1344 | } | |
1345 | ||
1346 | int | |
1347 | StoreEntry::validToSend() const | |
1348 | { | |
1349 | if (EBIT_TEST(flags, RELEASE_REQUEST)) | |
1350 | return 0; | |
1351 | ||
1352 | if (EBIT_TEST(flags, ENTRY_NEGCACHED)) | |
1353 | if (expires <= squid_curtime) | |
1354 | return 0; | |
1355 | ||
1356 | if (EBIT_TEST(flags, ENTRY_ABORTED)) | |
1357 | return 0; | |
1358 | ||
1359 | // now check that the entry has a cache backing or is collapsed | |
1360 | if (hasDisk()) // backed by a disk cache | |
1361 | return 1; | |
1362 | ||
1363 | if (swappingOut()) // will be backed by a disk cache | |
1364 | return 1; | |
1365 | ||
1366 | if (!mem_obj) // not backed by a memory cache and not collapsed | |
1367 | return 0; | |
1368 | ||
1369 | // StoreEntry::storeClientType() assumes DISK_CLIENT here, but there is no | |
1370 | // disk cache backing that store_client constructor will assert. XXX: This | |
1371 | // is wrong for range requests (that could feed off nibbled memory) and for | |
1372 | // entries backed by the shared memory cache (that could, in theory, get | |
1373 | // nibbled bytes from that cache, but there is no such "memoryIn" code). | |
1374 | if (mem_obj->inmem_lo) // in memory cache, but got nibbled at | |
1375 | return 0; | |
1376 | ||
1377 | // The following check is correct but useless at this position. TODO: Move | |
1378 | // it up when the shared memory cache can either replenish locally nibbled | |
1379 | // bytes or, better, does not use local RAM copy at all. | |
1380 | // if (mem_obj->memCache.index >= 0) // backed by a shared memory cache | |
1381 | // return 1; | |
1382 | ||
1383 | return 1; | |
1384 | } | |
1385 | ||
1386 | bool | |
1387 | StoreEntry::timestampsSet() | |
1388 | { | |
1389 | debugs(20, 7, *this << " had " << describeTimestamps()); | |
1390 | ||
1391 | // TODO: Remove change-reducing "&" before the official commit. | |
1392 | const auto reply = &mem().freshestReply(); | |
1393 | ||
1394 | time_t served_date = reply->date; | |
1395 | int age = reply->header.getInt(Http::HdrType::AGE); | |
1396 | /* Compute the timestamp, mimicking RFC2616 section 13.2.3. */ | |
1397 | /* make sure that 0 <= served_date <= squid_curtime */ | |
1398 | ||
1399 | if (served_date < 0 || served_date > squid_curtime) | |
1400 | served_date = squid_curtime; | |
1401 | ||
1402 | /* Bug 1791: | |
1403 | * If the returned Date: is more than 24 hours older than | |
1404 | * the squid_curtime, then one of us needs to use NTP to set our | |
1405 | * clock. We'll pretend that our clock is right. | |
1406 | */ | |
1407 | else if (served_date < (squid_curtime - 24 * 60 * 60) ) | |
1408 | served_date = squid_curtime; | |
1409 | ||
1410 | /* | |
1411 | * Compensate with Age header if origin server clock is ahead | |
1412 | * of us and there is a cache in between us and the origin | |
1413 | * server. But DONT compensate if the age value is larger than | |
1414 | * squid_curtime because it results in a negative served_date. | |
1415 | */ | |
1416 | if (age > squid_curtime - served_date) | |
1417 | if (squid_curtime > age) | |
1418 | served_date = squid_curtime - age; | |
1419 | ||
1420 | // compensate for Squid-to-server and server-to-Squid delays | |
1421 | if (mem_obj && mem_obj->request) { | |
1422 | struct timeval responseTime; | |
1423 | if (mem_obj->request->hier.peerResponseTime(responseTime)) | |
1424 | served_date -= responseTime.tv_sec; | |
1425 | } | |
1426 | ||
1427 | time_t exp = 0; | |
1428 | if (reply->expires > 0 && reply->date > -1) | |
1429 | exp = served_date + (reply->expires - reply->date); | |
1430 | else | |
1431 | exp = reply->expires; | |
1432 | ||
1433 | if (timestamp == served_date && expires == exp) { | |
1434 | // if the reply lacks LMT, then we now know that our effective | |
1435 | // LMT (i.e., timestamp) will stay the same, otherwise, old and | |
1436 | // new modification times must match | |
1437 | if (reply->last_modified < 0 || reply->last_modified == lastModified()) | |
1438 | return false; // nothing has changed | |
1439 | } | |
1440 | ||
1441 | expires = exp; | |
1442 | ||
1443 | lastModified_ = reply->last_modified; | |
1444 | ||
1445 | timestamp = served_date; | |
1446 | ||
1447 | debugs(20, 5, *this << " has " << describeTimestamps()); | |
1448 | return true; | |
1449 | } | |
1450 | ||
1451 | bool | |
1452 | StoreEntry::updateOnNotModified(const StoreEntry &e304) | |
1453 | { | |
1454 | assert(mem_obj); | |
1455 | assert(e304.mem_obj); | |
1456 | ||
1457 | // update reply before calling timestampsSet() below | |
1458 | const auto &oldReply = mem_obj->freshestReply(); | |
1459 | const auto updatedReply = oldReply.recreateOnNotModified(e304.mem_obj->baseReply()); | |
1460 | if (updatedReply) { // HTTP 304 brought in new information | |
1461 | if (updatedReply->prefixLen() > Config.maxReplyHeaderSize) { | |
1462 | throw TextException(ToSBuf("cannot update the cached response because its updated ", | |
1463 | updatedReply->prefixLen(), "-byte header would exceed ", | |
1464 | Config.maxReplyHeaderSize, "-byte reply_header_max_size"), Here()); | |
1465 | } | |
1466 | mem_obj->updateReply(*updatedReply); | |
1467 | } | |
1468 | // else continue to use the previous update, if any | |
1469 | ||
1470 | if (!timestampsSet() && !updatedReply) | |
1471 | return false; | |
1472 | ||
1473 | // Keep the old mem_obj->vary_headers; see HttpHeader::skipUpdateHeader(). | |
1474 | ||
1475 | debugs(20, 5, "updated basics in " << *this << " with " << e304); | |
1476 | mem_obj->appliedUpdates = true; // helps in triage; may already be true | |
1477 | return true; | |
1478 | } | |
1479 | ||
1480 | void | |
1481 | StoreEntry::registerAbortCallback(const AsyncCall::Pointer &handler) | |
1482 | { | |
1483 | assert(mem_obj); | |
1484 | assert(!mem_obj->abortCallback); | |
1485 | mem_obj->abortCallback = handler; | |
1486 | } | |
1487 | ||
1488 | void | |
1489 | StoreEntry::unregisterAbortCallback(const char *reason) | |
1490 | { | |
1491 | assert(mem_obj); | |
1492 | if (mem_obj->abortCallback) { | |
1493 | mem_obj->abortCallback->cancel(reason); | |
1494 | mem_obj->abortCallback = nullptr; | |
1495 | } | |
1496 | } | |
1497 | ||
1498 | void | |
1499 | StoreEntry::dump(int l) const | |
1500 | { | |
1501 | debugs(20, l, "StoreEntry->key: " << getMD5Text()); | |
1502 | debugs(20, l, "StoreEntry->next: " << next); | |
1503 | debugs(20, l, "StoreEntry->mem_obj: " << mem_obj); | |
1504 | debugs(20, l, "StoreEntry->timestamp: " << timestamp); | |
1505 | debugs(20, l, "StoreEntry->lastref: " << lastref); | |
1506 | debugs(20, l, "StoreEntry->expires: " << expires); | |
1507 | debugs(20, l, "StoreEntry->lastModified_: " << lastModified_); | |
1508 | debugs(20, l, "StoreEntry->swap_file_sz: " << swap_file_sz); | |
1509 | debugs(20, l, "StoreEntry->refcount: " << refcount); | |
1510 | debugs(20, l, "StoreEntry->flags: " << storeEntryFlags(this)); | |
1511 | debugs(20, l, "StoreEntry->swap_dirn: " << swap_dirn); | |
1512 | debugs(20, l, "StoreEntry->swap_filen: " << swap_filen); | |
1513 | debugs(20, l, "StoreEntry->lock_count: " << lock_count); | |
1514 | debugs(20, l, "StoreEntry->mem_status: " << mem_status); | |
1515 | debugs(20, l, "StoreEntry->ping_status: " << ping_status); | |
1516 | debugs(20, l, "StoreEntry->store_status: " << store_status); | |
1517 | debugs(20, l, "StoreEntry->swap_status: " << swap_status); | |
1518 | } | |
1519 | ||
1520 | /* | |
1521 | * NOTE, this function assumes only two mem states | |
1522 | */ | |
1523 | void | |
1524 | StoreEntry::setMemStatus(mem_status_t new_status) | |
1525 | { | |
1526 | if (new_status == mem_status) | |
1527 | return; | |
1528 | ||
1529 | // are we using a shared memory cache? | |
1530 | if (MemStore::Enabled()) { | |
1531 | // This method was designed to update replacement policy, not to | |
1532 | // actually purge something from the memory cache (TODO: rename?). | |
1533 | // Shared memory cache does not have a policy that needs updates. | |
1534 | mem_status = new_status; | |
1535 | return; | |
1536 | } | |
1537 | ||
1538 | assert(mem_obj != nullptr); | |
1539 | ||
1540 | if (new_status == IN_MEMORY) { | |
1541 | assert(mem_obj->inmem_lo == 0); | |
1542 | ||
1543 | if (EBIT_TEST(flags, ENTRY_SPECIAL)) { | |
1544 | debugs(20, 4, "not inserting special " << *this << " into policy"); | |
1545 | } else { | |
1546 | mem_policy->Add(mem_policy, this, &mem_obj->repl); | |
1547 | debugs(20, 4, "inserted " << *this << " key: " << getMD5Text()); | |
1548 | } | |
1549 | ||
1550 | ++hot_obj_count; // TODO: maintain for the shared hot cache as well | |
1551 | } else { | |
1552 | if (EBIT_TEST(flags, ENTRY_SPECIAL)) { | |
1553 | debugs(20, 4, "not removing special " << *this << " from policy"); | |
1554 | } else { | |
1555 | mem_policy->Remove(mem_policy, this, &mem_obj->repl); | |
1556 | debugs(20, 4, "removed " << *this); | |
1557 | } | |
1558 | ||
1559 | --hot_obj_count; | |
1560 | } | |
1561 | ||
1562 | mem_status = new_status; | |
1563 | } | |
1564 | ||
1565 | const char * | |
1566 | StoreEntry::url() const | |
1567 | { | |
1568 | if (mem_obj == nullptr) | |
1569 | return "[null_mem_obj]"; | |
1570 | else | |
1571 | return mem_obj->storeId(); | |
1572 | } | |
1573 | ||
1574 | void | |
1575 | StoreEntry::createMemObject() | |
1576 | { | |
1577 | assert(!mem_obj); | |
1578 | mem_obj = new MemObject(); | |
1579 | } | |
1580 | ||
1581 | void | |
1582 | StoreEntry::createMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod) | |
1583 | { | |
1584 | assert(!mem_obj); | |
1585 | ensureMemObject(aUrl, aLogUrl, aMethod); | |
1586 | } | |
1587 | ||
1588 | void | |
1589 | StoreEntry::ensureMemObject(const char *aUrl, const char *aLogUrl, const HttpRequestMethod &aMethod) | |
1590 | { | |
1591 | if (!mem_obj) | |
1592 | mem_obj = new MemObject(); | |
1593 | mem_obj->setUris(aUrl, aLogUrl, aMethod); | |
1594 | } | |
1595 | ||
1596 | /** disable sending content to the clients. | |
1597 | * | |
1598 | * This just sets DELAY_SENDING. | |
1599 | */ | |
1600 | void | |
1601 | StoreEntry::buffer() | |
1602 | { | |
1603 | EBIT_SET(flags, DELAY_SENDING); | |
1604 | } | |
1605 | ||
1606 | /** flush any buffered content. | |
1607 | * | |
1608 | * This just clears DELAY_SENDING and Invokes the handlers | |
1609 | * to begin sending anything that may be buffered. | |
1610 | */ | |
1611 | void | |
1612 | StoreEntry::flush() | |
1613 | { | |
1614 | if (EBIT_TEST(flags, DELAY_SENDING)) { | |
1615 | EBIT_CLR(flags, DELAY_SENDING); | |
1616 | invokeHandlers(); | |
1617 | } | |
1618 | } | |
1619 | ||
1620 | void | |
1621 | StoreEntry::reset() | |
1622 | { | |
1623 | debugs(20, 3, url()); | |
1624 | mem().reset(); | |
1625 | expires = lastModified_ = timestamp = -1; | |
1626 | } | |
1627 | ||
1628 | /* | |
1629 | * storeFsInit | |
1630 | * | |
1631 | * This routine calls the SETUP routine for each fs type. | |
1632 | * I don't know where the best place for this is, and I'm not going to shuffle | |
1633 | * around large chunks of code right now (that can be done once its working.) | |
1634 | */ | |
1635 | void | |
1636 | storeFsInit(void) | |
1637 | { | |
1638 | storeReplSetup(); | |
1639 | } | |
1640 | ||
1641 | /* | |
1642 | * called to add another store removal policy module | |
1643 | */ | |
1644 | void | |
1645 | storeReplAdd(const char *type, REMOVALPOLICYCREATE * create) | |
1646 | { | |
1647 | int i; | |
1648 | ||
1649 | /* find the number of currently known repl types */ | |
1650 | for (i = 0; storerepl_list && storerepl_list[i].typestr; ++i) { | |
1651 | if (strcmp(storerepl_list[i].typestr, type) == 0) { | |
1652 | debugs(20, DBG_IMPORTANT, "WARNING: Trying to load store replacement policy " << type << " twice."); | |
1653 | return; | |
1654 | } | |
1655 | } | |
1656 | ||
1657 | /* add the new type */ | |
1658 | storerepl_list = static_cast<storerepl_entry_t *>(xrealloc(storerepl_list, (i + 2) * sizeof(storerepl_entry_t))); | |
1659 | ||
1660 | memset(&storerepl_list[i + 1], 0, sizeof(storerepl_entry_t)); | |
1661 | ||
1662 | storerepl_list[i].typestr = type; | |
1663 | ||
1664 | storerepl_list[i].create = create; | |
1665 | } | |
1666 | ||
1667 | /* | |
1668 | * Create a removal policy instance | |
1669 | */ | |
1670 | RemovalPolicy * | |
1671 | createRemovalPolicy(RemovalPolicySettings * settings) | |
1672 | { | |
1673 | storerepl_entry_t *r; | |
1674 | ||
1675 | for (r = storerepl_list; r && r->typestr; ++r) { | |
1676 | if (strcmp(r->typestr, settings->type) == 0) | |
1677 | return r->create(settings->args); | |
1678 | } | |
1679 | ||
1680 | debugs(20, DBG_IMPORTANT, "ERROR: Unknown policy " << settings->type); | |
1681 | debugs(20, DBG_IMPORTANT, "ERROR: Be sure to have set cache_replacement_policy"); | |
1682 | debugs(20, DBG_IMPORTANT, "ERROR: and memory_replacement_policy in squid.conf!"); | |
1683 | fatalf("ERROR: Unknown policy %s\n", settings->type); | |
1684 | return nullptr; /* NOTREACHED */ | |
1685 | } | |
1686 | ||
1687 | void | |
1688 | StoreEntry::storeErrorResponse(HttpReply *reply) | |
1689 | { | |
1690 | lock("StoreEntry::storeErrorResponse"); | |
1691 | buffer(); | |
1692 | replaceHttpReply(HttpReplyPointer(reply)); | |
1693 | flush(); | |
1694 | completeSuccessfully("replaceHttpReply() stored the entire error"); | |
1695 | negativeCache(); | |
1696 | releaseRequest(false); // if it is safe to negatively cache, sharing is OK | |
1697 | unlock("StoreEntry::storeErrorResponse"); | |
1698 | } | |
1699 | ||
1700 | /* | |
1701 | * Replace a store entry with | |
1702 | * a new reply. This eats the reply. | |
1703 | */ | |
1704 | void | |
1705 | StoreEntry::replaceHttpReply(const HttpReplyPointer &rep, const bool andStartWriting) | |
1706 | { | |
1707 | debugs(20, 3, "StoreEntry::replaceHttpReply: " << url()); | |
1708 | ||
1709 | if (!mem_obj) { | |
1710 | debugs(20, DBG_CRITICAL, "Attempt to replace object with no in-memory representation"); | |
1711 | return; | |
1712 | } | |
1713 | ||
1714 | mem_obj->replaceBaseReply(rep); | |
1715 | ||
1716 | if (andStartWriting) | |
1717 | startWriting(); | |
1718 | } | |
1719 | ||
1720 | void | |
1721 | StoreEntry::startWriting() | |
1722 | { | |
1723 | /* TODO: when we store headers separately remove the header portion */ | |
1724 | /* TODO: mark the length of the headers ? */ | |
1725 | /* We ONLY want the headers */ | |
1726 | assert (isEmpty()); | |
1727 | assert(mem_obj); | |
1728 | ||
1729 | // Per MemObject replies definitions, we can only write our base reply. | |
1730 | // Currently, all callers replaceHttpReply() first, so there is no updated | |
1731 | // reply here anyway. Eventually, we may need to support the | |
1732 | // updateOnNotModified(),startWriting() sequence as well. | |
1733 | assert(!mem_obj->updatedReply()); | |
1734 | const auto rep = &mem_obj->baseReply(); | |
1735 | ||
1736 | buffer(); | |
1737 | rep->packHeadersUsingSlowPacker(*this); | |
1738 | mem_obj->markEndOfReplyHeaders(); | |
1739 | ||
1740 | // Same-worker collapsing risks end with the receipt of the headers. | |
1741 | // SMP collapsing risks remain until the headers are actually cached, but | |
1742 | // that event is announced via CF-agnostic Store writing broadcasts. | |
1743 | setCollapsingRequirement(false); | |
1744 | ||
1745 | rep->body.packInto(this); | |
1746 | flush(); | |
1747 | } | |
1748 | ||
1749 | char const * | |
1750 | StoreEntry::getSerialisedMetaData(size_t &length) const | |
1751 | { | |
1752 | return static_cast<const char *>(Store::PackSwapMeta(*this, length).release()); | |
1753 | } | |
1754 | ||
1755 | /** | |
1756 | * If needed, signal transient entry readers that no more cache changes are | |
1757 | * expected and, hence, they should switch to Plan B instead of getting stuck | |
1758 | * waiting for us to start or finish storing the entry. | |
1759 | */ | |
1760 | void | |
1761 | StoreEntry::storeWritingCheckpoint() | |
1762 | { | |
1763 | if (!hasTransients()) | |
1764 | return; // no SMP complications | |
1765 | ||
1766 | // writers become readers but only after completeWriting() which we trigger | |
1767 | if (Store::Root().transientsReader(*this)) | |
1768 | return; // readers do not need to inform | |
1769 | ||
1770 | assert(mem_obj); | |
1771 | if (mem_obj->memCache.io != Store::ioDone) { | |
1772 | debugs(20, 7, "not done with mem-caching " << *this); | |
1773 | return; | |
1774 | } | |
1775 | ||
1776 | const auto doneWithDiskCache = | |
1777 | // will not start | |
1778 | (mem_obj->swapout.decision == MemObject::SwapOut::swImpossible) || | |
1779 | // or has started but finished already | |
1780 | (mem_obj->swapout.decision == MemObject::SwapOut::swStarted && !swappingOut()); | |
1781 | if (!doneWithDiskCache) { | |
1782 | debugs(20, 7, "not done with disk-caching " << *this); | |
1783 | return; | |
1784 | } | |
1785 | ||
1786 | debugs(20, 7, "done with writing " << *this); | |
1787 | Store::Root().noteStoppedSharedWriting(*this); | |
1788 | } | |
1789 | ||
1790 | void | |
1791 | StoreEntry::memOutDecision(const bool willCacheInRam) | |
1792 | { | |
1793 | if (!willCacheInRam) | |
1794 | return storeWritingCheckpoint(); | |
1795 | assert(mem_obj->memCache.io != Store::ioDone); | |
1796 | // and wait for storeWriterDone() | |
1797 | } | |
1798 | ||
1799 | void | |
1800 | StoreEntry::swapOutDecision(const MemObject::SwapOut::Decision &decision) | |
1801 | { | |
1802 | assert(mem_obj); | |
1803 | mem_obj->swapout.decision = decision; | |
1804 | storeWritingCheckpoint(); | |
1805 | } | |
1806 | ||
1807 | void | |
1808 | StoreEntry::storeWriterDone() | |
1809 | { | |
1810 | storeWritingCheckpoint(); | |
1811 | } | |
1812 | ||
1813 | void | |
1814 | StoreEntry::trimMemory(const bool preserveSwappable) | |
1815 | { | |
1816 | /* | |
1817 | * DPW 2007-05-09 | |
1818 | * Bug #1943. We must not let go any data for IN_MEMORY | |
1819 | * objects. We have to wait until the mem_status changes. | |
1820 | */ | |
1821 | if (mem_status == IN_MEMORY) | |
1822 | return; | |
1823 | ||
1824 | if (EBIT_TEST(flags, ENTRY_SPECIAL)) | |
1825 | return; // cannot trim because we do not load them again | |
1826 | ||
1827 | if (preserveSwappable) | |
1828 | mem_obj->trimSwappable(); | |
1829 | else | |
1830 | mem_obj->trimUnSwappable(); | |
1831 | ||
1832 | debugs(88, 7, *this << " inmem_lo=" << mem_obj->inmem_lo); | |
1833 | } | |
1834 | ||
1835 | bool | |
1836 | StoreEntry::modifiedSince(const time_t ims, const int imslen) const | |
1837 | { | |
1838 | const time_t mod_time = lastModified(); | |
1839 | ||
1840 | debugs(88, 3, "modifiedSince: '" << url() << "'"); | |
1841 | ||
1842 | debugs(88, 3, "modifiedSince: mod_time = " << mod_time); | |
1843 | ||
1844 | if (mod_time < 0) | |
1845 | return true; | |
1846 | ||
1847 | assert(imslen < 0); // TODO: Either remove imslen or support it properly. | |
1848 | ||
1849 | if (mod_time > ims) { | |
1850 | debugs(88, 3, "--> YES: entry newer than client"); | |
1851 | return true; | |
1852 | } else if (mod_time < ims) { | |
1853 | debugs(88, 3, "--> NO: entry older than client"); | |
1854 | return false; | |
1855 | } else { | |
1856 | debugs(88, 3, "--> NO: same LMT"); | |
1857 | return false; | |
1858 | } | |
1859 | } | |
1860 | ||
1861 | bool | |
1862 | StoreEntry::hasEtag(ETag &etag) const | |
1863 | { | |
1864 | if (const auto reply = hasFreshestReply()) { | |
1865 | etag = reply->header.getETag(Http::HdrType::ETAG); | |
1866 | if (etag.str) | |
1867 | return true; | |
1868 | } | |
1869 | return false; | |
1870 | } | |
1871 | ||
1872 | bool | |
1873 | StoreEntry::hasIfMatchEtag(const HttpRequest &request) const | |
1874 | { | |
1875 | const String reqETags = request.header.getList(Http::HdrType::IF_MATCH); | |
1876 | return hasOneOfEtags(reqETags, false); | |
1877 | } | |
1878 | ||
1879 | bool | |
1880 | StoreEntry::hasIfNoneMatchEtag(const HttpRequest &request) const | |
1881 | { | |
1882 | const String reqETags = request.header.getList(Http::HdrType::IF_NONE_MATCH); | |
1883 | // weak comparison is allowed only for HEAD or full-body GET requests | |
1884 | const bool allowWeakMatch = !request.flags.isRanged && | |
1885 | (request.method == Http::METHOD_GET || request.method == Http::METHOD_HEAD); | |
1886 | return hasOneOfEtags(reqETags, allowWeakMatch); | |
1887 | } | |
1888 | ||
1889 | /// whether at least one of the request ETags matches entity ETag | |
1890 | bool | |
1891 | StoreEntry::hasOneOfEtags(const String &reqETags, const bool allowWeakMatch) const | |
1892 | { | |
1893 | const auto repETag = mem().freshestReply().header.getETag(Http::HdrType::ETAG); | |
1894 | if (!repETag.str) { | |
1895 | static SBuf asterisk("*", 1); | |
1896 | return strListIsMember(&reqETags, asterisk, ','); | |
1897 | } | |
1898 | ||
1899 | bool matched = false; | |
1900 | const char *pos = nullptr; | |
1901 | const char *item; | |
1902 | int ilen; | |
1903 | while (!matched && strListGetItem(&reqETags, ',', &item, &ilen, &pos)) { | |
1904 | if (!strncmp(item, "*", ilen)) | |
1905 | matched = true; | |
1906 | else { | |
1907 | String str; | |
1908 | str.append(item, ilen); | |
1909 | ETag reqETag; | |
1910 | if (etagParseInit(&reqETag, str.termedBuf())) { | |
1911 | matched = allowWeakMatch ? etagIsWeakEqual(repETag, reqETag) : | |
1912 | etagIsStrongEqual(repETag, reqETag); | |
1913 | } | |
1914 | } | |
1915 | } | |
1916 | return matched; | |
1917 | } | |
1918 | ||
1919 | Store::Disk & | |
1920 | StoreEntry::disk() const | |
1921 | { | |
1922 | assert(hasDisk()); | |
1923 | const RefCount<Store::Disk> &sd = INDEXSD(swap_dirn); | |
1924 | assert(sd); | |
1925 | return *sd; | |
1926 | } | |
1927 | ||
1928 | bool | |
1929 | StoreEntry::hasDisk(const sdirno dirn, const sfileno filen) const | |
1930 | { | |
1931 | checkDisk(); | |
1932 | if (dirn < 0 && filen < 0) | |
1933 | return swap_dirn >= 0; | |
1934 | Must(dirn >= 0); | |
1935 | const bool matchingDisk = (swap_dirn == dirn); | |
1936 | return filen < 0 ? matchingDisk : (matchingDisk && swap_filen == filen); | |
1937 | } | |
1938 | ||
1939 | void | |
1940 | StoreEntry::attachToDisk(const sdirno dirn, const sfileno fno, const swap_status_t status) | |
1941 | { | |
1942 | debugs(88, 3, "attaching entry with key " << getMD5Text() << " : " << | |
1943 | swapStatusStr[status] << " " << dirn << " " << | |
1944 | asHex(fno).upperCase().minDigits(8)); | |
1945 | checkDisk(); | |
1946 | swap_dirn = dirn; | |
1947 | swap_filen = fno; | |
1948 | swap_status = status; | |
1949 | checkDisk(); | |
1950 | } | |
1951 | ||
1952 | void | |
1953 | StoreEntry::detachFromDisk() | |
1954 | { | |
1955 | swap_dirn = -1; | |
1956 | swap_filen = -1; | |
1957 | swap_status = SWAPOUT_NONE; | |
1958 | } | |
1959 | ||
1960 | void | |
1961 | StoreEntry::checkDisk() const | |
1962 | { | |
1963 | try { | |
1964 | if (swap_dirn < 0) { | |
1965 | Must(swap_filen < 0); | |
1966 | Must(swap_status == SWAPOUT_NONE); | |
1967 | } else { | |
1968 | Must(swap_filen >= 0); | |
1969 | Must(static_cast<size_t>(swap_dirn) < Config.cacheSwap.n_configured); | |
1970 | if (swapoutFailed()) { | |
1971 | Must(EBIT_TEST(flags, RELEASE_REQUEST)); | |
1972 | } else { | |
1973 | Must(swappingOut() || swappedOut()); | |
1974 | } | |
1975 | } | |
1976 | } catch (...) { | |
1977 | debugs(88, DBG_IMPORTANT, "ERROR: inconsistent disk entry state " << | |
1978 | *this << "; problem: " << CurrentException); | |
1979 | throw; | |
1980 | } | |
1981 | } | |
1982 | ||
1983 | /* | |
1984 | * return true if the entry is in a state where | |
1985 | * it can accept more data (ie with write() method) | |
1986 | */ | |
1987 | bool | |
1988 | StoreEntry::isAccepting() const | |
1989 | { | |
1990 | if (STORE_PENDING != store_status) | |
1991 | return false; | |
1992 | ||
1993 | if (EBIT_TEST(flags, ENTRY_ABORTED)) | |
1994 | return false; | |
1995 | ||
1996 | return true; | |
1997 | } | |
1998 | ||
1999 | const char * | |
2000 | StoreEntry::describeTimestamps() const | |
2001 | { | |
2002 | LOCAL_ARRAY(char, buf, 256); | |
2003 | snprintf(buf, 256, "LV:%-9d LU:%-9d LM:%-9d EX:%-9d", | |
2004 | static_cast<int>(timestamp), | |
2005 | static_cast<int>(lastref), | |
2006 | static_cast<int>(lastModified_), | |
2007 | static_cast<int>(expires)); | |
2008 | return buf; | |
2009 | } | |
2010 | ||
2011 | void | |
2012 | StoreEntry::setCollapsingRequirement(const bool required) | |
2013 | { | |
2014 | if (hittingRequiresCollapsing() == required) | |
2015 | return; // no change | |
2016 | ||
2017 | debugs(20, 5, (required ? "adding to " : "removing from ") << *this); | |
2018 | if (required) | |
2019 | EBIT_SET(flags, ENTRY_REQUIRES_COLLAPSING); | |
2020 | else | |
2021 | EBIT_CLR(flags, ENTRY_REQUIRES_COLLAPSING); | |
2022 | } | |
2023 | ||
2024 | static std::ostream & | |
2025 | operator <<(std::ostream &os, const Store::IoStatus &io) | |
2026 | { | |
2027 | switch (io) { | |
2028 | case Store::ioUndecided: | |
2029 | os << 'u'; | |
2030 | break; | |
2031 | case Store::ioReading: | |
2032 | os << 'r'; | |
2033 | break; | |
2034 | case Store::ioWriting: | |
2035 | os << 'w'; | |
2036 | break; | |
2037 | case Store::ioDone: | |
2038 | os << 'o'; | |
2039 | break; | |
2040 | } | |
2041 | return os; | |
2042 | } | |
2043 | ||
2044 | std::ostream &operator <<(std::ostream &os, const StoreEntry &e) | |
2045 | { | |
2046 | os << "e:"; | |
2047 | ||
2048 | if (e.hasTransients()) { | |
2049 | const auto &xitTable = e.mem_obj->xitTable; | |
2050 | os << 't' << xitTable.io << xitTable.index; | |
2051 | } | |
2052 | ||
2053 | if (e.hasMemStore()) { | |
2054 | const auto &memCache = e.mem_obj->memCache; | |
2055 | os << 'm' << memCache.io << memCache.index << '@' << memCache.offset; | |
2056 | } | |
2057 | ||
2058 | // Do not use e.hasDisk() here because its checkDisk() call may calls us. | |
2059 | if (e.swap_filen > -1 || e.swap_dirn > -1) | |
2060 | os << 'd' << e.swap_filen << '@' << e.swap_dirn; | |
2061 | ||
2062 | os << '='; | |
2063 | ||
2064 | // print only non-default status values, using unique letters | |
2065 | if (e.mem_status != NOT_IN_MEMORY || | |
2066 | e.store_status != STORE_PENDING || | |
2067 | e.swap_status != SWAPOUT_NONE || | |
2068 | e.ping_status != PING_NONE) { | |
2069 | if (e.mem_status != NOT_IN_MEMORY) os << 'm'; | |
2070 | if (e.store_status != STORE_PENDING) os << 's'; | |
2071 | if (e.swap_status != SWAPOUT_NONE) os << 'w' << e.swap_status; | |
2072 | if (e.ping_status != PING_NONE) os << 'p' << e.ping_status; | |
2073 | } | |
2074 | ||
2075 | // print only set flags, using unique letters | |
2076 | if (e.flags) { | |
2077 | if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) os << 'S'; | |
2078 | if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_ALWAYS)) os << 'R'; | |
2079 | if (EBIT_TEST(e.flags, DELAY_SENDING)) os << 'P'; | |
2080 | if (EBIT_TEST(e.flags, RELEASE_REQUEST)) os << 'X'; | |
2081 | if (EBIT_TEST(e.flags, REFRESH_REQUEST)) os << 'F'; | |
2082 | if (EBIT_TEST(e.flags, ENTRY_REVALIDATE_STALE)) os << 'E'; | |
2083 | if (EBIT_TEST(e.flags, KEY_PRIVATE)) { | |
2084 | os << 'I'; | |
2085 | if (e.shareableWhenPrivate) | |
2086 | os << 'H'; | |
2087 | } | |
2088 | if (EBIT_TEST(e.flags, ENTRY_FWD_HDR_WAIT)) os << 'W'; | |
2089 | if (EBIT_TEST(e.flags, ENTRY_NEGCACHED)) os << 'N'; | |
2090 | if (EBIT_TEST(e.flags, ENTRY_VALIDATED)) os << 'V'; | |
2091 | if (EBIT_TEST(e.flags, ENTRY_BAD_LENGTH)) os << 'L'; | |
2092 | if (EBIT_TEST(e.flags, ENTRY_ABORTED)) os << 'A'; | |
2093 | if (EBIT_TEST(e.flags, ENTRY_REQUIRES_COLLAPSING)) os << 'C'; | |
2094 | } | |
2095 | ||
2096 | return os << '/' << &e << '*' << e.locks(); | |
2097 | } | |
2098 | ||
2099 | void | |
2100 | Store::EntryGuard::onException() noexcept | |
2101 | { | |
2102 | SWALLOW_EXCEPTIONS({ | |
2103 | entry_->releaseRequest(false); | |
2104 | entry_->unlock(context_); | |
2105 | }); | |
2106 | } | |
2107 |