]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 1996-2025 The Squid Software Foundation and contributors | |
3 | * | |
4 | * Squid software is distributed under GPLv2+ license and includes | |
5 | * contributions from numerous individuals and organizations. | |
6 | * Please see the COPYING and CONTRIBUTORS files for details. | |
7 | */ | |
8 | ||
9 | /* DEBUG: section 20 Store Controller */ | |
10 | ||
11 | #include "squid.h" | |
12 | #include "mem_node.h" | |
13 | #include "MemStore.h" | |
14 | #include "SquidConfig.h" | |
15 | #include "SquidMath.h" | |
16 | #include "store/Controller.h" | |
17 | #include "store/Disks.h" | |
18 | #include "store/forward.h" | |
19 | #include "store/LocalSearch.h" | |
20 | #include "tools.h" | |
21 | #include "Transients.h" | |
22 | ||
23 | #if HAVE_SYS_WAIT_H | |
24 | #include <sys/wait.h> | |
25 | #endif | |
26 | ||
27 | /* | |
28 | * store_dirs_rebuilding is initialized to _1_ as a hack so that | |
29 | * storeDirWriteCleanLogs() doesn't try to do anything unless _all_ | |
30 | * cache_dirs have been read. For example, without this hack, Squid | |
31 | * will try to write clean log files if -kparse fails (because it | |
32 | * calls fatal()). | |
33 | */ | |
34 | int Store::Controller::store_dirs_rebuilding = 1; | |
35 | ||
36 | Store::Controller::Controller() : | |
37 | disks(new Disks), | |
38 | sharedMemStore(nullptr), | |
39 | localMemStore(false), | |
40 | transients(nullptr) | |
41 | { | |
42 | assert(!store_table); | |
43 | } | |
44 | ||
45 | /// this destructor is never called because Controller singleton is immortal | |
46 | Store::Controller::~Controller() | |
47 | { | |
48 | // assert at runtime because we cannot `= delete` an overridden destructor | |
49 | assert(!"Controller is never destroyed"); | |
50 | } | |
51 | ||
52 | void | |
53 | Store::Controller::init() | |
54 | { | |
55 | if (IamWorkerProcess()) { | |
56 | if (MemStore::Enabled()) { | |
57 | sharedMemStore = new MemStore; | |
58 | sharedMemStore->init(); | |
59 | } else if (Config.memMaxSize > 0) { | |
60 | localMemStore = true; | |
61 | } | |
62 | } | |
63 | ||
64 | disks->init(); | |
65 | ||
66 | if (Transients::Enabled() && IamWorkerProcess()) { | |
67 | transients = new Transients; | |
68 | transients->init(); | |
69 | } | |
70 | } | |
71 | ||
72 | void | |
73 | Store::Controller::create() | |
74 | { | |
75 | disks->create(); | |
76 | ||
77 | #if !(_SQUID_WINDOWS_ || _SQUID_MINGW_) | |
78 | pid_t pid; | |
79 | do { | |
80 | PidStatus status; | |
81 | pid = WaitForAnyPid(status, WNOHANG); | |
82 | } while (pid > 0 || (pid < 0 && errno == EINTR)); | |
83 | #endif | |
84 | } | |
85 | ||
86 | void | |
87 | Store::Controller::maintain() | |
88 | { | |
89 | static time_t last_warn_time = 0; | |
90 | ||
91 | disks->maintain(); | |
92 | ||
93 | /* this should be emitted by the oversize dir, not globally */ | |
94 | ||
95 | if (Root().currentSize() > Store::Root().maxSize()) { | |
96 | if (squid_curtime - last_warn_time > 10) { | |
97 | debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: " | |
98 | << Store::Root().currentSize() / 1024.0 << " KB > " | |
99 | << (Store::Root().maxSize() >> 10) << " KB"); | |
100 | last_warn_time = squid_curtime; | |
101 | } | |
102 | } | |
103 | } | |
104 | ||
105 | void | |
106 | Store::Controller::getStats(StoreInfoStats &stats) const | |
107 | { | |
108 | if (sharedMemStore) | |
109 | sharedMemStore->getStats(stats); | |
110 | else { | |
111 | // move this code to a non-shared memory cache class when we have it | |
112 | stats.mem.shared = false; | |
113 | stats.mem.capacity = Config.memMaxSize; | |
114 | stats.mem.size = mem_node::StoreMemSize(); | |
115 | if (localMemStore) { | |
116 | // XXX: also count internal/in-transit objects | |
117 | stats.mem.count = hot_obj_count; | |
118 | } else { | |
119 | // XXX: count internal/in-transit objects instead | |
120 | stats.mem.count = hot_obj_count; | |
121 | } | |
122 | } | |
123 | ||
124 | disks->getStats(stats); | |
125 | ||
126 | // low-level info not specific to memory or disk cache | |
127 | stats.store_entry_count = StoreEntry::inUseCount(); | |
128 | stats.mem_object_count = MemObject::inUseCount(); | |
129 | } | |
130 | ||
131 | void | |
132 | Store::Controller::stat(StoreEntry &output) const | |
133 | { | |
134 | storeAppendPrintf(&output, "Store Directory Statistics:\n"); | |
135 | storeAppendPrintf(&output, "Store Entries : %lu\n", | |
136 | (unsigned long int)StoreEntry::inUseCount()); | |
137 | storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n", | |
138 | maxSize() >> 10); | |
139 | storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n", | |
140 | currentSize() / 1024.0); | |
141 | storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n", | |
142 | Math::doublePercent(currentSize(), maxSize()), | |
143 | Math::doublePercent((maxSize() - currentSize()), maxSize())); | |
144 | ||
145 | if (sharedMemStore) | |
146 | sharedMemStore->stat(output); | |
147 | ||
148 | disks->stat(output); | |
149 | } | |
150 | ||
151 | /* if needed, this could be taught to cache the result */ | |
152 | uint64_t | |
153 | Store::Controller::maxSize() const | |
154 | { | |
155 | /* TODO: include memory cache ? */ | |
156 | return disks->maxSize(); | |
157 | } | |
158 | ||
159 | uint64_t | |
160 | Store::Controller::minSize() const | |
161 | { | |
162 | /* TODO: include memory cache ? */ | |
163 | return disks->minSize(); | |
164 | } | |
165 | ||
166 | uint64_t | |
167 | Store::Controller::currentSize() const | |
168 | { | |
169 | /* TODO: include memory cache ? */ | |
170 | return disks->currentSize(); | |
171 | } | |
172 | ||
173 | uint64_t | |
174 | Store::Controller::currentCount() const | |
175 | { | |
176 | /* TODO: include memory cache ? */ | |
177 | return disks->currentCount(); | |
178 | } | |
179 | ||
180 | int64_t | |
181 | Store::Controller::maxObjectSize() const | |
182 | { | |
183 | /* TODO: include memory cache ? */ | |
184 | return disks->maxObjectSize(); | |
185 | } | |
186 | ||
187 | void | |
188 | Store::Controller::configure() | |
189 | { | |
190 | disks->configure(); | |
191 | ||
192 | store_swap_high = (long) (((float) maxSize() * | |
193 | (float) Config.Swap.highWaterMark) / (float) 100); | |
194 | store_swap_low = (long) (((float) maxSize() * | |
195 | (float) Config.Swap.lowWaterMark) / (float) 100); | |
196 | store_pages_max = Config.memMaxSize / sizeof(mem_node); | |
197 | ||
198 | // TODO: move this into a memory cache class when we have one | |
199 | const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize)); | |
200 | const int64_t disksMax = disks->maxObjectSize(); | |
201 | store_maxobjsize = std::max(disksMax, memMax); | |
202 | } | |
203 | ||
204 | StoreSearch * | |
205 | Store::Controller::search() | |
206 | { | |
207 | // this is the only kind of search we currently support | |
208 | return NewLocalSearch(); | |
209 | } | |
210 | ||
211 | void | |
212 | Store::Controller::sync(void) | |
213 | { | |
214 | if (sharedMemStore) | |
215 | sharedMemStore->sync(); | |
216 | disks->sync(); | |
217 | } | |
218 | ||
219 | /* | |
220 | * handle callbacks all available fs'es | |
221 | */ | |
222 | int | |
223 | Store::Controller::callback() | |
224 | { | |
225 | /* mem cache callbacks ? */ | |
226 | return disks->callback(); | |
227 | } | |
228 | ||
229 | /// update reference counters of the recently touched entry | |
230 | void | |
231 | Store::Controller::referenceBusy(StoreEntry &e) | |
232 | { | |
233 | // special entries do not belong to any specific Store, but are IN_MEMORY | |
234 | if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) | |
235 | return; | |
236 | ||
237 | /* Notify the fs that we're referencing this object again */ | |
238 | ||
239 | if (e.hasDisk()) | |
240 | disks->reference(e); | |
241 | ||
242 | // Notify the memory cache that we're referencing this object again | |
243 | if (sharedMemStore && e.mem_status == IN_MEMORY) | |
244 | sharedMemStore->reference(e); | |
245 | ||
246 | // TODO: move this code to a non-shared memory cache class when we have it | |
247 | if (e.mem_obj) { | |
248 | if (mem_policy->Referenced) | |
249 | mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl); | |
250 | } | |
251 | } | |
252 | ||
253 | /// dereference()s an idle entry | |
254 | /// \returns false if and only if the entry should be deleted | |
255 | bool | |
256 | Store::Controller::dereferenceIdle(StoreEntry &e, bool wantsLocalMemory) | |
257 | { | |
258 | // special entries do not belong to any specific Store, but are IN_MEMORY | |
259 | if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) | |
260 | return true; | |
261 | ||
262 | // idle private entries cannot be reused | |
263 | if (EBIT_TEST(e.flags, KEY_PRIVATE)) | |
264 | return false; | |
265 | ||
266 | bool keepInStoreTable = false; // keep only if somebody needs it there | |
267 | ||
268 | // Notify the fs that we are not referencing this object any more. This | |
269 | // should be done even if we overwrite keepInStoreTable afterwards. | |
270 | ||
271 | if (e.hasDisk()) | |
272 | keepInStoreTable = disks->dereference(e) || keepInStoreTable; | |
273 | ||
274 | // Notify the memory cache that we're not referencing this object any more | |
275 | if (sharedMemStore && e.mem_status == IN_MEMORY) | |
276 | keepInStoreTable = sharedMemStore->dereference(e) || keepInStoreTable; | |
277 | ||
278 | // TODO: move this code to a non-shared memory cache class when we have it | |
279 | if (e.mem_obj) { | |
280 | if (mem_policy->Dereferenced) | |
281 | mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl); | |
282 | // non-shared memory cache relies on store_table | |
283 | if (localMemStore) | |
284 | keepInStoreTable = wantsLocalMemory || keepInStoreTable; | |
285 | } | |
286 | ||
287 | if (e.hittingRequiresCollapsing()) { | |
288 | // If we were writing this now-locally-idle entry, then we did not | |
289 | // finish and should now destroy an incomplete entry. Otherwise, do not | |
290 | // leave this idle StoreEntry behind because handleIMSReply() lacks | |
291 | // freshness checks when hitting a collapsed revalidation entry. | |
292 | keepInStoreTable = false; // may overrule fs decisions made above | |
293 | } | |
294 | ||
295 | return keepInStoreTable; | |
296 | } | |
297 | ||
298 | bool | |
299 | Store::Controller::markedForDeletion(const cache_key *key) const | |
300 | { | |
301 | // assuming a public key, checking Transients should cover all cases. | |
302 | return transients && transients->markedForDeletion(key); | |
303 | } | |
304 | ||
305 | bool | |
306 | Store::Controller::markedForDeletionAndAbandoned(const StoreEntry &e) const | |
307 | { | |
308 | // The opposite check order could miss a reader that has arrived after the | |
309 | // !readers() and before the markedForDeletion() check. | |
310 | return markedForDeletion(reinterpret_cast<const cache_key*>(e.key)) && | |
311 | transients && !transients->readers(e); | |
312 | } | |
313 | ||
314 | bool | |
315 | Store::Controller::hasReadableDiskEntry(const StoreEntry &e) const | |
316 | { | |
317 | return disks->hasReadableEntry(e); | |
318 | } | |
319 | ||
320 | /// flags problematic entries before find() commits to finalizing/returning them | |
321 | void | |
322 | Store::Controller::checkFoundCandidate(const StoreEntry &entry) const | |
323 | { | |
324 | checkTransients(entry); | |
325 | ||
326 | // The "hittingRequiresCollapsing() has an active writer" checks below | |
327 | // protect callers from getting stuck and/or from using a stale revalidation | |
328 | // reply. However, these protections are not reliable because the writer may | |
329 | // disappear at any time and/or without a trace. Collapsing adds risks... | |
330 | if (entry.hittingRequiresCollapsing()) { | |
331 | if (entry.hasTransients()) { | |
332 | // Too late to check here because the writer may be gone by now, but | |
333 | // Transients do check when they setCollapsingRequirement(). | |
334 | } else { | |
335 | // a local writer must hold a lock on its writable entry | |
336 | if (!(entry.locked() && entry.isAccepting())) | |
337 | throw TextException("no local writer", Here()); | |
338 | } | |
339 | } | |
340 | } | |
341 | ||
342 | StoreEntry * | |
343 | Store::Controller::find(const cache_key *key) | |
344 | { | |
345 | if (const auto entry = peek(key)) { | |
346 | try { | |
347 | if (!entry->key) | |
348 | allowSharing(*entry, key); | |
349 | checkFoundCandidate(*entry); | |
350 | entry->touch(); | |
351 | referenceBusy(*entry); | |
352 | return entry; | |
353 | } catch (const std::exception &ex) { | |
354 | debugs(20, 2, "failed with " << *entry << ": " << ex.what()); | |
355 | entry->release(); | |
356 | // fall through | |
357 | } | |
358 | } | |
359 | return nullptr; | |
360 | } | |
361 | ||
362 | /// indexes and adds SMP-tracking for an ephemeral peek() result | |
363 | void | |
364 | Store::Controller::allowSharing(StoreEntry &entry, const cache_key *key) | |
365 | { | |
366 | // anchorToCache() below and many find() callers expect a registered entry | |
367 | addReading(&entry, key); | |
368 | ||
369 | if (entry.hasTransients()) { | |
370 | // store hadWriter before computing `found`; \see Transients::get() | |
371 | const auto hadWriter = transients->hasWriter(entry); | |
372 | const auto found = anchorToCache(entry); | |
373 | if (!found) { | |
374 | // !found should imply hittingRequiresCollapsing() regardless of writer presence | |
375 | if (!entry.hittingRequiresCollapsing()) { | |
376 | debugs(20, DBG_IMPORTANT, "ERROR: Squid BUG: missing ENTRY_REQUIRES_COLLAPSING for " << entry); | |
377 | throw TextException("transients entry missing ENTRY_REQUIRES_COLLAPSING", Here()); | |
378 | } | |
379 | ||
380 | if (!hadWriter) { | |
381 | // prevent others from falling into the same trap | |
382 | throw TextException("unattached transients entry missing writer", Here()); | |
383 | } | |
384 | } | |
385 | } | |
386 | } | |
387 | ||
388 | StoreEntry * | |
389 | Store::Controller::findCallbackXXX(const cache_key *key) | |
390 | { | |
391 | // We could check for mem_obj presence (and more), moving and merging some | |
392 | // of the duplicated neighborsUdpAck() and neighborsHtcpReply() code here, | |
393 | // but that would mean polluting Store with HTCP/ICP code. Instead, we | |
394 | // should encapsulate callback-related data in a protocol-neutral MemObject | |
395 | // member or use an HTCP/ICP-specific index rather than store_table. | |
396 | ||
397 | // cannot reuse peekAtLocal() because HTCP/ICP callbacks may use private keys | |
398 | return static_cast<StoreEntry*>(hash_lookup(store_table, key)); | |
399 | } | |
400 | ||
401 | /// \returns either an existing local reusable StoreEntry object or nil | |
402 | /// To treat remotely marked entries specially, | |
403 | /// callers ought to check markedForDeletion() first! | |
404 | StoreEntry * | |
405 | Store::Controller::peekAtLocal(const cache_key *key) | |
406 | { | |
407 | if (StoreEntry *e = static_cast<StoreEntry*>(hash_lookup(store_table, key))) { | |
408 | // callers must only search for public entries | |
409 | assert(!EBIT_TEST(e->flags, KEY_PRIVATE)); | |
410 | assert(e->publicKey()); | |
411 | checkTransients(*e); | |
412 | ||
413 | // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries | |
414 | // because their backing store slot may be gone already. | |
415 | return e; | |
416 | } | |
417 | return nullptr; | |
418 | } | |
419 | ||
420 | StoreEntry * | |
421 | Store::Controller::peek(const cache_key *key) | |
422 | { | |
423 | debugs(20, 3, storeKeyText(key)); | |
424 | ||
425 | if (markedForDeletion(key)) { | |
426 | debugs(20, 3, "ignoring marked in-transit " << storeKeyText(key)); | |
427 | return nullptr; | |
428 | } | |
429 | ||
430 | if (StoreEntry *e = peekAtLocal(key)) { | |
431 | debugs(20, 3, "got local in-transit entry: " << *e); | |
432 | return e; | |
433 | } | |
434 | ||
435 | // Must search transients before caches because we must sync those we find. | |
436 | if (transients) { | |
437 | if (StoreEntry *e = transients->get(key)) { | |
438 | debugs(20, 3, "got shared in-transit entry: " << *e); | |
439 | return e; | |
440 | } | |
441 | } | |
442 | ||
443 | if (sharedMemStore) { | |
444 | if (StoreEntry *e = sharedMemStore->get(key)) { | |
445 | debugs(20, 3, "got mem-cached entry: " << *e); | |
446 | return e; | |
447 | } | |
448 | } | |
449 | ||
450 | if (const auto e = disks->get(key)) { | |
451 | debugs(20, 3, "got disk-cached entry: " << *e); | |
452 | return e; | |
453 | } | |
454 | ||
455 | debugs(20, 4, "cannot locate " << storeKeyText(key)); | |
456 | return nullptr; | |
457 | } | |
458 | ||
459 | bool | |
460 | Store::Controller::transientsReader(const StoreEntry &e) const | |
461 | { | |
462 | return transients && e.hasTransients() && transients->isReader(e); | |
463 | } | |
464 | ||
465 | bool | |
466 | Store::Controller::transientsWriter(const StoreEntry &e) const | |
467 | { | |
468 | return transients && e.hasTransients() && transients->isWriter(e); | |
469 | } | |
470 | ||
471 | int64_t | |
472 | Store::Controller::accumulateMore(StoreEntry &entry) const | |
473 | { | |
474 | return disks->accumulateMore(entry); | |
475 | // The memory cache should not influence for-swapout accumulation decision. | |
476 | } | |
477 | ||
478 | // Must be called from StoreEntry::release() or releaseRequest() because | |
479 | // those methods currently manage local indexing of StoreEntry objects. | |
480 | // TODO: Replace StoreEntry::release*() with Root().evictCached(). | |
481 | void | |
482 | Store::Controller::evictCached(StoreEntry &e) | |
483 | { | |
484 | debugs(20, 7, e); | |
485 | if (transients) | |
486 | transients->evictCached(e); | |
487 | memoryEvictCached(e); | |
488 | disks->evictCached(e); | |
489 | } | |
490 | ||
491 | void | |
492 | Store::Controller::evictIfFound(const cache_key *key) | |
493 | { | |
494 | debugs(20, 7, storeKeyText(key)); | |
495 | ||
496 | if (StoreEntry *entry = peekAtLocal(key)) { | |
497 | debugs(20, 5, "marking local in-transit " << *entry); | |
498 | entry->release(true); | |
499 | return; | |
500 | } | |
501 | ||
502 | if (sharedMemStore) | |
503 | sharedMemStore->evictIfFound(key); | |
504 | ||
505 | disks->evictIfFound(key); | |
506 | ||
507 | if (transients) | |
508 | transients->evictIfFound(key); | |
509 | } | |
510 | ||
511 | /// whether the memory cache is allowed to store that many additional pages | |
512 | bool | |
513 | Store::Controller::memoryCacheHasSpaceFor(const int pagesRequired) const | |
514 | { | |
515 | // XXX: We count mem_nodes but may free shared memory pages instead. | |
516 | const auto fits = mem_node::InUseCount() + pagesRequired <= store_pages_max; | |
517 | debugs(20, 7, fits << ": " << mem_node::InUseCount() << '+' << pagesRequired << '?' << store_pages_max); | |
518 | return fits; | |
519 | } | |
520 | ||
521 | void | |
522 | Store::Controller::freeMemorySpace(const int bytesRequired) | |
523 | { | |
524 | const auto pagesRequired = (bytesRequired + SM_PAGE_SIZE-1) / SM_PAGE_SIZE; | |
525 | ||
526 | if (memoryCacheHasSpaceFor(pagesRequired)) | |
527 | return; | |
528 | ||
529 | // XXX: When store_pages_max is smaller than pagesRequired, we should not | |
530 | // look for more space (but we do because we want to abandon idle entries?). | |
531 | ||
532 | // limit our performance impact to one walk per second | |
533 | static time_t lastWalk = 0; | |
534 | if (lastWalk == squid_curtime) | |
535 | return; | |
536 | lastWalk = squid_curtime; | |
537 | ||
538 | debugs(20, 2, "need " << pagesRequired << " pages"); | |
539 | ||
540 | // let abandon()/handleIdleEntry() know about the impeding memory shortage | |
541 | memoryPagesDebt_ = pagesRequired; | |
542 | ||
543 | // XXX: SMP-unaware: Walkers should iterate memory cache, not store_table. | |
544 | // XXX: Limit iterations by time, not arbitrary count. | |
545 | const auto walker = mem_policy->PurgeInit(mem_policy, 100000); | |
546 | int removed = 0; | |
547 | while (const auto entry = walker->Next(walker)) { | |
548 | // Abandoned memory cache entries are purged during memory shortage. | |
549 | entry->abandon(__func__); // may delete entry | |
550 | ++removed; | |
551 | ||
552 | if (memoryCacheHasSpaceFor(pagesRequired)) | |
553 | break; | |
554 | } | |
555 | // TODO: Move to RemovalPolicyWalker::Done() that has more/better details. | |
556 | debugs(20, 3, "removed " << removed << " out of " << hot_obj_count << " memory-cached entries"); | |
557 | walker->Done(walker); | |
558 | memoryPagesDebt_ = 0; | |
559 | } | |
560 | ||
561 | // move this into [non-shared] memory cache class when we have one | |
562 | /// whether e should be kept in local RAM for possible future caching | |
563 | bool | |
564 | Store::Controller::keepForLocalMemoryCache(StoreEntry &e) const | |
565 | { | |
566 | if (!e.memoryCachable()) | |
567 | return false; | |
568 | ||
569 | // does the current and expected size obey memory caching limits? | |
570 | assert(e.mem_obj); | |
571 | const int64_t loadedSize = e.mem_obj->endOffset(); | |
572 | const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0 | |
573 | const int64_t ramSize = max(loadedSize, expectedSize); | |
574 | const int64_t ramLimit = min( | |
575 | static_cast<int64_t>(Config.memMaxSize), | |
576 | static_cast<int64_t>(Config.Store.maxInMemObjSize)); | |
577 | return ramSize <= ramLimit; | |
578 | } | |
579 | ||
580 | void | |
581 | Store::Controller::memoryOut(StoreEntry &e, const bool preserveSwappable) | |
582 | { | |
583 | bool keepInLocalMemory = false; | |
584 | if (sharedMemStore) | |
585 | sharedMemStore->write(e); // leave keepInLocalMemory false | |
586 | else if (localMemStore) | |
587 | keepInLocalMemory = keepForLocalMemoryCache(e); | |
588 | ||
589 | debugs(20, 7, "keepInLocalMemory: " << keepInLocalMemory); | |
590 | ||
591 | if (!keepInLocalMemory) | |
592 | e.trimMemory(preserveSwappable); | |
593 | } | |
594 | ||
595 | /// removes the entry from the memory cache | |
596 | /// XXX: Dangerous side effect: Unlocked entries lose their mem_obj. | |
597 | void | |
598 | Store::Controller::memoryEvictCached(StoreEntry &e) | |
599 | { | |
600 | // TODO: Untangle memory caching from mem_obj. | |
601 | if (sharedMemStore) | |
602 | sharedMemStore->evictCached(e); | |
603 | else // TODO: move into [non-shared] memory cache class when we have one | |
604 | if (!e.locked()) | |
605 | e.destroyMemObject(); | |
606 | } | |
607 | ||
608 | void | |
609 | Store::Controller::memoryDisconnect(StoreEntry &e) | |
610 | { | |
611 | if (sharedMemStore) | |
612 | sharedMemStore->disconnect(e); | |
613 | // else nothing to do for non-shared memory cache | |
614 | } | |
615 | ||
616 | void | |
617 | Store::Controller::noteStoppedSharedWriting(StoreEntry &e) | |
618 | { | |
619 | if (transients && e.hasTransients()) // paranoid: the caller should check | |
620 | transients->completeWriting(e); | |
621 | } | |
622 | ||
623 | int | |
624 | Store::Controller::transientReaders(const StoreEntry &e) const | |
625 | { | |
626 | return (transients && e.hasTransients()) ? | |
627 | transients->readers(e) : 0; | |
628 | } | |
629 | ||
630 | void | |
631 | Store::Controller::transientsDisconnect(StoreEntry &e) | |
632 | { | |
633 | if (transients) | |
634 | transients->disconnect(e); | |
635 | } | |
636 | ||
637 | void | |
638 | Store::Controller::handleIdleEntry(StoreEntry &e) | |
639 | { | |
640 | bool keepInLocalMemory = false; | |
641 | ||
642 | if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) { | |
643 | // Icons (and cache digests?) should stay in store_table until we | |
644 | // have a dedicated storage for them (that would not purge them). | |
645 | // They are not managed [well] by any specific Store handled below. | |
646 | keepInLocalMemory = true; | |
647 | } else if (sharedMemStore) { | |
648 | // leave keepInLocalMemory false; sharedMemStore maintains its own cache | |
649 | } else if (localMemStore) { | |
650 | keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and | |
651 | // the local memory cache is not overflowing | |
652 | memoryCacheHasSpaceFor(memoryPagesDebt_); | |
653 | } | |
654 | ||
655 | // An idle, unlocked entry that only belongs to a SwapDir which controls | |
656 | // its own index, should not stay in the global store_table. | |
657 | if (!dereferenceIdle(e, keepInLocalMemory)) { | |
658 | debugs(20, 5, "destroying unlocked entry: " << &e << ' ' << e); | |
659 | destroyStoreEntry(static_cast<hash_link*>(&e)); | |
660 | return; | |
661 | } | |
662 | ||
663 | debugs(20, 5, "keepInLocalMemory: " << keepInLocalMemory); | |
664 | ||
665 | // formerly known as "WARNING: found KEY_PRIVATE" | |
666 | assert(!EBIT_TEST(e.flags, KEY_PRIVATE)); | |
667 | ||
668 | // TODO: move this into [non-shared] memory cache class when we have one | |
669 | if (keepInLocalMemory) { | |
670 | e.setMemStatus(IN_MEMORY); | |
671 | e.mem_obj->unlinkRequest(); | |
672 | return; | |
673 | } | |
674 | ||
675 | // We know the in-memory data will be gone. Get rid of the entire entry if | |
676 | // it has nothing worth preserving on disk either. | |
677 | if (!e.swappedOut()) { | |
678 | e.release(); // deletes e | |
679 | return; | |
680 | } | |
681 | ||
682 | memoryEvictCached(e); // may already be gone | |
683 | // and keep the entry in store_table for its on-disk data | |
684 | } | |
685 | ||
686 | bool | |
687 | Store::Controller::updateOnNotModified(StoreEntry *old, StoreEntry &e304) | |
688 | { | |
689 | Must(old); | |
690 | Must(old->mem_obj); | |
691 | Must(e304.mem_obj); | |
692 | ||
693 | // updateOnNotModified() may be called many times for the same old entry. | |
694 | // e304.mem_obj->appliedUpdates value distinguishes two cases: | |
695 | // false: Independent store clients revalidating the same old StoreEntry. | |
696 | // Each such update uses its own e304. The old StoreEntry | |
697 | // accumulates such independent updates. | |
698 | // true: Store clients feeding off the same 304 response. Each such update | |
699 | // uses the same e304. For timestamps correctness and performance | |
700 | // sake, it is best to detect and skip such repeated update calls. | |
701 | if (e304.mem_obj->appliedUpdates) { | |
702 | debugs(20, 5, "ignored repeated update of " << *old << " with " << e304); | |
703 | return true; | |
704 | } | |
705 | e304.mem_obj->appliedUpdates = true; | |
706 | ||
707 | try { | |
708 | if (!old->updateOnNotModified(e304)) { | |
709 | debugs(20, 5, "updated nothing in " << *old << " with " << e304); | |
710 | return true; | |
711 | } | |
712 | } catch (...) { | |
713 | debugs(20, DBG_IMPORTANT, "ERROR: Failed to update a cached response: " << CurrentException); | |
714 | return false; | |
715 | } | |
716 | ||
717 | if (sharedMemStore && old->mem_status == IN_MEMORY && !EBIT_TEST(old->flags, ENTRY_SPECIAL)) | |
718 | sharedMemStore->updateHeaders(old); | |
719 | ||
720 | if (old->swap_dirn > -1) | |
721 | disks->updateHeaders(old); | |
722 | ||
723 | return true; | |
724 | } | |
725 | ||
726 | bool | |
727 | Store::Controller::allowCollapsing(StoreEntry *e, const RequestFlags &reqFlags, | |
728 | const HttpRequestMethod &) | |
729 | { | |
730 | const KeyScope keyScope = reqFlags.refresh ? ksRevalidation : ksDefault; | |
731 | // set the flag now so that it gets copied into the Transients entry | |
732 | e->setCollapsingRequirement(true); | |
733 | if (e->makePublic(keyScope)) { // this is needed for both local and SMP collapsing | |
734 | debugs(20, 3, "may " << (transients && e->hasTransients() ? | |
735 | "SMP-" : "locally-") << "collapse " << *e); | |
736 | assert(e->hittingRequiresCollapsing()); | |
737 | return true; | |
738 | } | |
739 | // paranoid cleanup; the flag is meaningless for private entries | |
740 | e->setCollapsingRequirement(false); | |
741 | return false; | |
742 | } | |
743 | ||
744 | void | |
745 | Store::Controller::addReading(StoreEntry *e, const cache_key *key) | |
746 | { | |
747 | if (transients) | |
748 | transients->monitorIo(e, key, Store::ioReading); | |
749 | e->hashInsert(key); | |
750 | } | |
751 | ||
752 | void | |
753 | Store::Controller::addWriting(StoreEntry *e, const cache_key *key) | |
754 | { | |
755 | assert(e); | |
756 | if (EBIT_TEST(e->flags, ENTRY_SPECIAL)) | |
757 | return; // constant memory-resident entries do not need transients | |
758 | ||
759 | if (transients) | |
760 | transients->monitorIo(e, key, Store::ioWriting); | |
761 | // else: non-SMP configurations do not need transients | |
762 | } | |
763 | ||
764 | void | |
765 | Store::Controller::syncCollapsed(const sfileno xitIndex) | |
766 | { | |
767 | assert(transients); | |
768 | ||
769 | StoreEntry *collapsed = transients->findCollapsed(xitIndex); | |
770 | if (!collapsed) { // the entry is no longer active, ignore update | |
771 | debugs(20, 7, "not SMP-syncing not-transient " << xitIndex); | |
772 | return; | |
773 | } | |
774 | ||
775 | if (!collapsed->locked()) { | |
776 | debugs(20, 3, "skipping (and may destroy) unlocked " << *collapsed); | |
777 | handleIdleEntry(*collapsed); | |
778 | return; | |
779 | } | |
780 | ||
781 | assert(collapsed->mem_obj); | |
782 | ||
783 | if (EBIT_TEST(collapsed->flags, ENTRY_ABORTED)) { | |
784 | debugs(20, 3, "skipping already aborted " << *collapsed); | |
785 | return; | |
786 | } | |
787 | ||
788 | debugs(20, 7, "syncing " << *collapsed); | |
789 | ||
790 | Transients::EntryStatus entryStatus; | |
791 | transients->status(*collapsed, entryStatus); | |
792 | ||
793 | if (entryStatus.waitingToBeFreed) { | |
794 | debugs(20, 3, "will release " << *collapsed << " due to waitingToBeFreed"); | |
795 | collapsed->release(true); // may already be marked | |
796 | } | |
797 | ||
798 | if (transients->isWriter(*collapsed)) | |
799 | return; // readers can only change our waitingToBeFreed flag | |
800 | ||
801 | assert(transients->isReader(*collapsed)); | |
802 | ||
803 | bool found = false; | |
804 | bool inSync = false; | |
805 | if (sharedMemStore && collapsed->mem_obj->memCache.io == Store::ioDone) { | |
806 | found = true; | |
807 | inSync = true; | |
808 | debugs(20, 7, "already handled by memory store: " << *collapsed); | |
809 | } else if (sharedMemStore && collapsed->hasMemStore()) { | |
810 | found = true; | |
811 | inSync = sharedMemStore->updateAnchored(*collapsed); | |
812 | // TODO: handle entries attached to both memory and disk | |
813 | } else if (collapsed->hasDisk()) { | |
814 | found = true; | |
815 | inSync = disks->updateAnchored(*collapsed); | |
816 | } else { | |
817 | try { | |
818 | found = anchorToCache(*collapsed); | |
819 | inSync = found; | |
820 | } catch (...) { | |
821 | // TODO: Write an exception handler for the entire method. | |
822 | debugs(20, 3, "anchorToCache() failed for " << *collapsed << ": " << CurrentException); | |
823 | collapsed->abort(); | |
824 | return; | |
825 | } | |
826 | } | |
827 | ||
828 | if (entryStatus.waitingToBeFreed && !found) { | |
829 | debugs(20, 3, "aborting unattached " << *collapsed << | |
830 | " because it was marked for deletion before we could attach it"); | |
831 | collapsed->abort(); | |
832 | return; | |
833 | } | |
834 | ||
835 | if (inSync) { | |
836 | debugs(20, 5, "synced " << *collapsed); | |
837 | assert(found); | |
838 | collapsed->setCollapsingRequirement(false); | |
839 | collapsed->invokeHandlers(); | |
840 | return; | |
841 | } | |
842 | ||
843 | if (found) { // unrecoverable problem syncing this entry | |
844 | debugs(20, 3, "aborting unsyncable " << *collapsed); | |
845 | collapsed->abort(); | |
846 | return; | |
847 | } | |
848 | ||
849 | if (!entryStatus.hasWriter) { | |
850 | debugs(20, 3, "aborting abandoned-by-writer " << *collapsed); | |
851 | collapsed->abort(); | |
852 | return; | |
853 | } | |
854 | ||
855 | // the entry is still not in one of the caches | |
856 | debugs(20, 7, "waiting " << *collapsed); | |
857 | collapsed->setCollapsingRequirement(true); | |
858 | } | |
859 | ||
860 | /// If possible and has not been done, associates the entry with its store(s). | |
861 | /// \returns false for not-yet-cached entries that we may attach later | |
862 | /// \returns true for other entries after synchronizing them with their store | |
863 | bool | |
864 | Store::Controller::anchorToCache(StoreEntry &entry) | |
865 | { | |
866 | assert(entry.hasTransients()); | |
867 | assert(transientsReader(entry)); | |
868 | ||
869 | // TODO: Attach entries to both memory and disk | |
870 | ||
871 | // TODO: Reduce code duplication with syncCollapsed() | |
872 | if (sharedMemStore && entry.mem().memCache.io == Store::ioDone) { | |
873 | debugs(20, 5, "already handled by memory store: " << entry); | |
874 | return true; | |
875 | } else if (sharedMemStore && entry.hasMemStore()) { | |
876 | debugs(20, 5, "already anchored to memory store: " << entry); | |
877 | return true; | |
878 | } else if (entry.hasDisk()) { | |
879 | debugs(20, 5, "already anchored to disk: " << entry); | |
880 | return true; | |
881 | } | |
882 | ||
883 | debugs(20, 7, "anchoring " << entry); | |
884 | ||
885 | Transients::EntryStatus entryStatus; | |
886 | transients->status(entry, entryStatus); | |
887 | ||
888 | bool found = false; | |
889 | if (sharedMemStore) | |
890 | found = sharedMemStore->anchorToCache(entry); | |
891 | if (!found) | |
892 | found = disks->anchorToCache(entry); | |
893 | ||
894 | if (found) { | |
895 | debugs(20, 7, "anchored " << entry); | |
896 | entry.setCollapsingRequirement(false); | |
897 | return true; | |
898 | } | |
899 | ||
900 | if (entryStatus.waitingToBeFreed) | |
901 | throw TextException("will never be able to anchor to an already marked entry", Here()); | |
902 | ||
903 | if (!entryStatus.hasWriter) | |
904 | throw TextException("will never be able to anchor to an abandoned-by-writer entry", Here()); | |
905 | ||
906 | debugs(20, 7, "skipping not yet cached " << entry); | |
907 | entry.setCollapsingRequirement(true); | |
908 | return false; | |
909 | } | |
910 | ||
911 | bool | |
912 | Store::Controller::SmpAware() | |
913 | { | |
914 | return MemStore::Enabled() || Disks::SmpAware(); | |
915 | } | |
916 | ||
917 | void | |
918 | Store::Controller::checkTransients(const StoreEntry &e) const | |
919 | { | |
920 | if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) | |
921 | return; | |
922 | assert(!transients || e.hasTransients()); | |
923 | } | |
924 | ||
925 | Store::Controller& | |
926 | Store::Root() | |
927 | { | |
928 | static const auto root = new Controller(); | |
929 | return *root; | |
930 | } | |
931 |