]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store/Controller.cc
Maintenance: update MemBlob (#2106)
[thirdparty/squid.git] / src / store / Controller.cc
CommitLineData
2745fea5 1/*
1f7b830e 2 * Copyright (C) 1996-2025 The Squid Software Foundation and contributors
2745fea5
AR
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9/* DEBUG: section 20 Store Controller */
10
11#include "squid.h"
12#include "mem_node.h"
13#include "MemStore.h"
2745fea5
AR
14#include "SquidConfig.h"
15#include "SquidMath.h"
16#include "store/Controller.h"
17#include "store/Disks.h"
a48f1beb 18#include "store/forward.h"
2745fea5
AR
19#include "store/LocalSearch.h"
20#include "tools.h"
21#include "Transients.h"
22
23#if HAVE_SYS_WAIT_H
24#include <sys/wait.h>
25#endif
26
27/*
28 * store_dirs_rebuilding is initialized to _1_ as a hack so that
29 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
30 * cache_dirs have been read. For example, without this hack, Squid
2f8abb64 31 * will try to write clean log files if -kparse fails (because it
2745fea5
AR
32 * calls fatal()).
33 */
34int Store::Controller::store_dirs_rebuilding = 1;
35
36Store::Controller::Controller() :
e305d771 37 disks(new Disks),
daed75a9
EB
38 sharedMemStore(nullptr),
39 localMemStore(false),
aee3523a 40 transients(nullptr)
2745fea5
AR
41{
42 assert(!store_table);
43}
44
1f50e07b 45/// this destructor is never called because Controller singleton is immortal
2745fea5
AR
46Store::Controller::~Controller()
47{
1f50e07b
AR
48 // assert at runtime because we cannot `= delete` an overridden destructor
49 assert(!"Controller is never destroyed");
2745fea5
AR
50}
51
52void
53Store::Controller::init()
54{
daed75a9
EB
55 if (IamWorkerProcess()) {
56 if (MemStore::Enabled()) {
57 sharedMemStore = new MemStore;
58 sharedMemStore->init();
59 } else if (Config.memMaxSize > 0) {
60 localMemStore = true;
61 }
2745fea5
AR
62 }
63
e305d771 64 disks->init();
2745fea5 65
daed75a9 66 if (Transients::Enabled() && IamWorkerProcess()) {
2745fea5
AR
67 transients = new Transients;
68 transients->init();
69 }
70}
71
72void
73Store::Controller::create()
74{
e305d771 75 disks->create();
2745fea5 76
403e7a61 77#if !(_SQUID_WINDOWS_ || _SQUID_MINGW_)
2745fea5 78 pid_t pid;
2745fea5 79 do {
7def3beb
MM
80 PidStatus status;
81 pid = WaitForAnyPid(status, WNOHANG);
2745fea5 82 } while (pid > 0 || (pid < 0 && errno == EINTR));
2745fea5
AR
83#endif
84}
85
86void
87Store::Controller::maintain()
88{
89 static time_t last_warn_time = 0;
90
e305d771 91 disks->maintain();
2745fea5
AR
92
93 /* this should be emitted by the oversize dir, not globally */
94
95 if (Root().currentSize() > Store::Root().maxSize()) {
96 if (squid_curtime - last_warn_time > 10) {
97 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
98 << Store::Root().currentSize() / 1024.0 << " KB > "
99 << (Store::Root().maxSize() >> 10) << " KB");
100 last_warn_time = squid_curtime;
101 }
102 }
2745fea5
AR
103}
104
105void
106Store::Controller::getStats(StoreInfoStats &stats) const
107{
daed75a9
EB
108 if (sharedMemStore)
109 sharedMemStore->getStats(stats);
2745fea5
AR
110 else {
111 // move this code to a non-shared memory cache class when we have it
112 stats.mem.shared = false;
113 stats.mem.capacity = Config.memMaxSize;
114 stats.mem.size = mem_node::StoreMemSize();
daed75a9
EB
115 if (localMemStore) {
116 // XXX: also count internal/in-transit objects
117 stats.mem.count = hot_obj_count;
118 } else {
119 // XXX: count internal/in-transit objects instead
120 stats.mem.count = hot_obj_count;
121 }
2745fea5
AR
122 }
123
e305d771 124 disks->getStats(stats);
2745fea5
AR
125
126 // low-level info not specific to memory or disk cache
127 stats.store_entry_count = StoreEntry::inUseCount();
128 stats.mem_object_count = MemObject::inUseCount();
129}
130
131void
132Store::Controller::stat(StoreEntry &output) const
133{
134 storeAppendPrintf(&output, "Store Directory Statistics:\n");
135 storeAppendPrintf(&output, "Store Entries : %lu\n",
136 (unsigned long int)StoreEntry::inUseCount());
137 storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
138 maxSize() >> 10);
139 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
140 currentSize() / 1024.0);
141 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
142 Math::doublePercent(currentSize(), maxSize()),
143 Math::doublePercent((maxSize() - currentSize()), maxSize()));
144
daed75a9
EB
145 if (sharedMemStore)
146 sharedMemStore->stat(output);
2745fea5 147
e305d771 148 disks->stat(output);
2745fea5
AR
149}
150
151/* if needed, this could be taught to cache the result */
152uint64_t
153Store::Controller::maxSize() const
154{
155 /* TODO: include memory cache ? */
e305d771 156 return disks->maxSize();
2745fea5
AR
157}
158
159uint64_t
160Store::Controller::minSize() const
161{
162 /* TODO: include memory cache ? */
e305d771 163 return disks->minSize();
2745fea5
AR
164}
165
166uint64_t
167Store::Controller::currentSize() const
168{
169 /* TODO: include memory cache ? */
e305d771 170 return disks->currentSize();
2745fea5
AR
171}
172
173uint64_t
174Store::Controller::currentCount() const
175{
176 /* TODO: include memory cache ? */
e305d771 177 return disks->currentCount();
2745fea5
AR
178}
179
180int64_t
181Store::Controller::maxObjectSize() const
182{
183 /* TODO: include memory cache ? */
e305d771 184 return disks->maxObjectSize();
2745fea5
AR
185}
186
5ca027f0 187void
5d84beb5 188Store::Controller::configure()
5ca027f0 189{
e305d771 190 disks->configure();
5ca027f0
AR
191
192 store_swap_high = (long) (((float) maxSize() *
193 (float) Config.Swap.highWaterMark) / (float) 100);
194 store_swap_low = (long) (((float) maxSize() *
195 (float) Config.Swap.lowWaterMark) / (float) 100);
196 store_pages_max = Config.memMaxSize / sizeof(mem_node);
197
198 // TODO: move this into a memory cache class when we have one
199 const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize));
e305d771 200 const int64_t disksMax = disks->maxObjectSize();
5ca027f0
AR
201 store_maxobjsize = std::max(disksMax, memMax);
202}
203
2745fea5
AR
204StoreSearch *
205Store::Controller::search()
206{
207 // this is the only kind of search we currently support
208 return NewLocalSearch();
209}
210
211void
212Store::Controller::sync(void)
213{
daed75a9
EB
214 if (sharedMemStore)
215 sharedMemStore->sync();
e305d771 216 disks->sync();
2745fea5
AR
217}
218
219/*
2f8abb64 220 * handle callbacks all available fs'es
2745fea5
AR
221 */
222int
223Store::Controller::callback()
224{
2745fea5 225 /* mem cache callbacks ? */
e305d771 226 return disks->callback();
2745fea5
AR
227}
228
685f3e9c 229/// update reference counters of the recently touched entry
2745fea5
AR
230void
231Store::Controller::referenceBusy(StoreEntry &e)
232{
233 // special entries do not belong to any specific Store, but are IN_MEMORY
234 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
235 return;
236
237 /* Notify the fs that we're referencing this object again */
238
4310f8b0 239 if (e.hasDisk())
e305d771 240 disks->reference(e);
2745fea5
AR
241
242 // Notify the memory cache that we're referencing this object again
daed75a9
EB
243 if (sharedMemStore && e.mem_status == IN_MEMORY)
244 sharedMemStore->reference(e);
2745fea5
AR
245
246 // TODO: move this code to a non-shared memory cache class when we have it
247 if (e.mem_obj) {
248 if (mem_policy->Referenced)
249 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
250 }
251}
252
685f3e9c
AR
253/// dereference()s an idle entry
254/// \returns false if and only if the entry should be deleted
2745fea5
AR
255bool
256Store::Controller::dereferenceIdle(StoreEntry &e, bool wantsLocalMemory)
257{
258 // special entries do not belong to any specific Store, but are IN_MEMORY
259 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
260 return true;
261
8a07d123
AR
262 // idle private entries cannot be reused
263 if (EBIT_TEST(e.flags, KEY_PRIVATE))
264 return false;
265
2745fea5
AR
266 bool keepInStoreTable = false; // keep only if somebody needs it there
267
6c8cbe63
D
268 // Notify the fs that we are not referencing this object any more. This
269 // should be done even if we overwrite keepInStoreTable afterwards.
2745fea5 270
4310f8b0 271 if (e.hasDisk())
e305d771 272 keepInStoreTable = disks->dereference(e) || keepInStoreTable;
2745fea5
AR
273
274 // Notify the memory cache that we're not referencing this object any more
daed75a9
EB
275 if (sharedMemStore && e.mem_status == IN_MEMORY)
276 keepInStoreTable = sharedMemStore->dereference(e) || keepInStoreTable;
2745fea5
AR
277
278 // TODO: move this code to a non-shared memory cache class when we have it
279 if (e.mem_obj) {
280 if (mem_policy->Dereferenced)
281 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
282 // non-shared memory cache relies on store_table
daed75a9 283 if (localMemStore)
2745fea5
AR
284 keepInStoreTable = wantsLocalMemory || keepInStoreTable;
285 }
286
6c8cbe63
D
287 if (e.hittingRequiresCollapsing()) {
288 // If we were writing this now-locally-idle entry, then we did not
289 // finish and should now destroy an incomplete entry. Otherwise, do not
290 // leave this idle StoreEntry behind because handleIMSReply() lacks
291 // freshness checks when hitting a collapsed revalidation entry.
292 keepInStoreTable = false; // may overrule fs decisions made above
293 }
294
2745fea5
AR
295 return keepInStoreTable;
296}
297
4310f8b0
EB
298bool
299Store::Controller::markedForDeletion(const cache_key *key) const
300{
301 // assuming a public key, checking Transients should cover all cases.
302 return transients && transients->markedForDeletion(key);
303}
304
305bool
306Store::Controller::markedForDeletionAndAbandoned(const StoreEntry &e) const
307{
308 // The opposite check order could miss a reader that has arrived after the
309 // !readers() and before the markedForDeletion() check.
310 return markedForDeletion(reinterpret_cast<const cache_key*>(e.key)) &&
311 transients && !transients->readers(e);
312}
313
314bool
315Store::Controller::hasReadableDiskEntry(const StoreEntry &e) const
316{
e305d771 317 return disks->hasReadableEntry(e);
4310f8b0
EB
318}
319
6c8cbe63
D
320/// flags problematic entries before find() commits to finalizing/returning them
321void
322Store::Controller::checkFoundCandidate(const StoreEntry &entry) const
323{
324 checkTransients(entry);
325
326 // The "hittingRequiresCollapsing() has an active writer" checks below
327 // protect callers from getting stuck and/or from using a stale revalidation
328 // reply. However, these protections are not reliable because the writer may
329 // disappear at any time and/or without a trace. Collapsing adds risks...
330 if (entry.hittingRequiresCollapsing()) {
331 if (entry.hasTransients()) {
332 // Too late to check here because the writer may be gone by now, but
333 // Transients do check when they setCollapsingRequirement().
334 } else {
335 // a local writer must hold a lock on its writable entry
336 if (!(entry.locked() && entry.isAccepting()))
337 throw TextException("no local writer", Here());
338 }
339 }
340}
341
2745fea5 342StoreEntry *
4310f8b0 343Store::Controller::find(const cache_key *key)
2745fea5 344{
4310f8b0
EB
345 if (const auto entry = peek(key)) {
346 try {
347 if (!entry->key)
348 allowSharing(*entry, key);
6c8cbe63 349 checkFoundCandidate(*entry);
4310f8b0
EB
350 entry->touch();
351 referenceBusy(*entry);
352 return entry;
353 } catch (const std::exception &ex) {
354 debugs(20, 2, "failed with " << *entry << ": " << ex.what());
d1d3b4dc 355 entry->release();
4310f8b0
EB
356 // fall through
357 }
2745fea5 358 }
aee3523a 359 return nullptr;
2745fea5
AR
360}
361
4310f8b0
EB
362/// indexes and adds SMP-tracking for an ephemeral peek() result
363void
364Store::Controller::allowSharing(StoreEntry &entry, const cache_key *key)
365{
4310f8b0
EB
366 // anchorToCache() below and many find() callers expect a registered entry
367 addReading(&entry, key);
368
369 if (entry.hasTransients()) {
6c8cbe63
D
370 // store hadWriter before computing `found`; \see Transients::get()
371 const auto hadWriter = transients->hasWriter(entry);
778610b5 372 const auto found = anchorToCache(entry);
d1d3b4dc
EB
373 if (!found) {
374 // !found should imply hittingRequiresCollapsing() regardless of writer presence
375 if (!entry.hittingRequiresCollapsing()) {
d816f28d 376 debugs(20, DBG_IMPORTANT, "ERROR: Squid BUG: missing ENTRY_REQUIRES_COLLAPSING for " << entry);
d1d3b4dc
EB
377 throw TextException("transients entry missing ENTRY_REQUIRES_COLLAPSING", Here());
378 }
379
6c8cbe63 380 if (!hadWriter) {
d1d3b4dc
EB
381 // prevent others from falling into the same trap
382 throw TextException("unattached transients entry missing writer", Here());
383 }
384 }
4310f8b0
EB
385 }
386}
387
2745fea5 388StoreEntry *
80d0fe08 389Store::Controller::findCallbackXXX(const cache_key *key)
2745fea5 390{
4310f8b0
EB
391 // We could check for mem_obj presence (and more), moving and merging some
392 // of the duplicated neighborsUdpAck() and neighborsHtcpReply() code here,
393 // but that would mean polluting Store with HTCP/ICP code. Instead, we
394 // should encapsulate callback-related data in a protocol-neutral MemObject
395 // member or use an HTCP/ICP-specific index rather than store_table.
80d0fe08
EB
396
397 // cannot reuse peekAtLocal() because HTCP/ICP callbacks may use private keys
398 return static_cast<StoreEntry*>(hash_lookup(store_table, key));
4310f8b0 399}
2745fea5 400
4310f8b0
EB
401/// \returns either an existing local reusable StoreEntry object or nil
402/// To treat remotely marked entries specially,
403/// callers ought to check markedForDeletion() first!
404StoreEntry *
405Store::Controller::peekAtLocal(const cache_key *key)
406{
2745fea5 407 if (StoreEntry *e = static_cast<StoreEntry*>(hash_lookup(store_table, key))) {
4310f8b0
EB
408 // callers must only search for public entries
409 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
410 assert(e->publicKey());
411 checkTransients(*e);
412
2745fea5
AR
413 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
414 // because their backing store slot may be gone already.
4310f8b0
EB
415 return e;
416 }
417 return nullptr;
418}
419
420StoreEntry *
421Store::Controller::peek(const cache_key *key)
422{
423 debugs(20, 3, storeKeyText(key));
424
425 if (markedForDeletion(key)) {
426 debugs(20, 3, "ignoring marked in-transit " << storeKeyText(key));
427 return nullptr;
428 }
429
430 if (StoreEntry *e = peekAtLocal(key)) {
431 debugs(20, 3, "got local in-transit entry: " << *e);
2745fea5
AR
432 return e;
433 }
434
435 // Must search transients before caches because we must sync those we find.
436 if (transients) {
437 if (StoreEntry *e = transients->get(key)) {
438 debugs(20, 3, "got shared in-transit entry: " << *e);
4310f8b0 439 return e;
2745fea5
AR
440 }
441 }
442
daed75a9
EB
443 if (sharedMemStore) {
444 if (StoreEntry *e = sharedMemStore->get(key)) {
bf95c10a 445 debugs(20, 3, "got mem-cached entry: " << *e);
2745fea5
AR
446 return e;
447 }
448 }
449
e305d771
A
450 if (const auto e = disks->get(key)) {
451 debugs(20, 3, "got disk-cached entry: " << *e);
452 return e;
2745fea5
AR
453 }
454
455 debugs(20, 4, "cannot locate " << storeKeyText(key));
456 return nullptr;
457}
458
4310f8b0
EB
459bool
460Store::Controller::transientsReader(const StoreEntry &e) const
461{
462 return transients && e.hasTransients() && transients->isReader(e);
463}
464
465bool
466Store::Controller::transientsWriter(const StoreEntry &e) const
467{
468 return transients && e.hasTransients() && transients->isWriter(e);
469}
470
5ca027f0
AR
471int64_t
472Store::Controller::accumulateMore(StoreEntry &entry) const
473{
e305d771 474 return disks->accumulateMore(entry);
5ca027f0
AR
475 // The memory cache should not influence for-swapout accumulation decision.
476}
477
4310f8b0
EB
478// Must be called from StoreEntry::release() or releaseRequest() because
479// those methods currently manage local indexing of StoreEntry objects.
480// TODO: Replace StoreEntry::release*() with Root().evictCached().
481void
482Store::Controller::evictCached(StoreEntry &e)
483{
484 debugs(20, 7, e);
485 if (transients)
486 transients->evictCached(e);
487 memoryEvictCached(e);
e305d771 488 disks->evictCached(e);
4310f8b0
EB
489}
490
2745fea5 491void
4310f8b0
EB
492Store::Controller::evictIfFound(const cache_key *key)
493{
494 debugs(20, 7, storeKeyText(key));
495
496 if (StoreEntry *entry = peekAtLocal(key)) {
497 debugs(20, 5, "marking local in-transit " << *entry);
498 entry->release(true);
499 return;
500 }
501
daed75a9
EB
502 if (sharedMemStore)
503 sharedMemStore->evictIfFound(key);
e305d771
A
504
505 disks->evictIfFound(key);
506
4310f8b0
EB
507 if (transients)
508 transients->evictIfFound(key);
509}
510
511/// whether the memory cache is allowed to store that many additional pages
512bool
513Store::Controller::memoryCacheHasSpaceFor(const int pagesRequired) const
2745fea5 514{
4310f8b0
EB
515 // XXX: We count mem_nodes but may free shared memory pages instead.
516 const auto fits = mem_node::InUseCount() + pagesRequired <= store_pages_max;
517 debugs(20, 7, fits << ": " << mem_node::InUseCount() << '+' << pagesRequired << '?' << store_pages_max);
518 return fits;
2745fea5
AR
519}
520
521void
4310f8b0 522Store::Controller::freeMemorySpace(const int bytesRequired)
2745fea5 523{
4310f8b0
EB
524 const auto pagesRequired = (bytesRequired + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
525
526 if (memoryCacheHasSpaceFor(pagesRequired))
527 return;
528
529 // XXX: When store_pages_max is smaller than pagesRequired, we should not
530 // look for more space (but we do because we want to abandon idle entries?).
531
532 // limit our performance impact to one walk per second
533 static time_t lastWalk = 0;
534 if (lastWalk == squid_curtime)
535 return;
536 lastWalk = squid_curtime;
537
538 debugs(20, 2, "need " << pagesRequired << " pages");
539
540 // let abandon()/handleIdleEntry() know about the impeding memory shortage
541 memoryPagesDebt_ = pagesRequired;
542
543 // XXX: SMP-unaware: Walkers should iterate memory cache, not store_table.
544 // XXX: Limit iterations by time, not arbitrary count.
545 const auto walker = mem_policy->PurgeInit(mem_policy, 100000);
546 int removed = 0;
547 while (const auto entry = walker->Next(walker)) {
548 // Abandoned memory cache entries are purged during memory shortage.
d868b138 549 entry->abandon(__func__); // may delete entry
4310f8b0
EB
550 ++removed;
551
552 if (memoryCacheHasSpaceFor(pagesRequired))
553 break;
554 }
555 // TODO: Move to RemovalPolicyWalker::Done() that has more/better details.
556 debugs(20, 3, "removed " << removed << " out of " << hot_obj_count << " memory-cached entries");
557 walker->Done(walker);
558 memoryPagesDebt_ = 0;
2745fea5
AR
559}
560
561// move this into [non-shared] memory cache class when we have one
562/// whether e should be kept in local RAM for possible future caching
563bool
564Store::Controller::keepForLocalMemoryCache(StoreEntry &e) const
565{
566 if (!e.memoryCachable())
567 return false;
568
569 // does the current and expected size obey memory caching limits?
570 assert(e.mem_obj);
571 const int64_t loadedSize = e.mem_obj->endOffset();
572 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
573 const int64_t ramSize = max(loadedSize, expectedSize);
574 const int64_t ramLimit = min(
575 static_cast<int64_t>(Config.memMaxSize),
576 static_cast<int64_t>(Config.Store.maxInMemObjSize));
577 return ramSize <= ramLimit;
578}
579
580void
581Store::Controller::memoryOut(StoreEntry &e, const bool preserveSwappable)
582{
583 bool keepInLocalMemory = false;
daed75a9
EB
584 if (sharedMemStore)
585 sharedMemStore->write(e); // leave keepInLocalMemory false
586 else if (localMemStore)
2745fea5
AR
587 keepInLocalMemory = keepForLocalMemoryCache(e);
588
bf95c10a 589 debugs(20, 7, "keepInLocalMemory: " << keepInLocalMemory);
2745fea5
AR
590
591 if (!keepInLocalMemory)
592 e.trimMemory(preserveSwappable);
593}
594
4310f8b0
EB
595/// removes the entry from the memory cache
596/// XXX: Dangerous side effect: Unlocked entries lose their mem_obj.
2745fea5 597void
4310f8b0 598Store::Controller::memoryEvictCached(StoreEntry &e)
2745fea5 599{
4310f8b0 600 // TODO: Untangle memory caching from mem_obj.
daed75a9
EB
601 if (sharedMemStore)
602 sharedMemStore->evictCached(e);
2745fea5 603 else // TODO: move into [non-shared] memory cache class when we have one
4310f8b0
EB
604 if (!e.locked())
605 e.destroyMemObject();
2745fea5
AR
606}
607
608void
609Store::Controller::memoryDisconnect(StoreEntry &e)
610{
daed75a9
EB
611 if (sharedMemStore)
612 sharedMemStore->disconnect(e);
2745fea5
AR
613 // else nothing to do for non-shared memory cache
614}
615
616void
24c93780 617Store::Controller::noteStoppedSharedWriting(StoreEntry &e)
2745fea5 618{
24c93780 619 if (transients && e.hasTransients()) // paranoid: the caller should check
4310f8b0 620 transients->completeWriting(e);
2745fea5
AR
621}
622
623int
624Store::Controller::transientReaders(const StoreEntry &e) const
625{
4310f8b0 626 return (transients && e.hasTransients()) ?
2745fea5
AR
627 transients->readers(e) : 0;
628}
629
630void
4310f8b0 631Store::Controller::transientsDisconnect(StoreEntry &e)
2745fea5
AR
632{
633 if (transients)
4310f8b0 634 transients->disconnect(e);
2745fea5
AR
635}
636
637void
638Store::Controller::handleIdleEntry(StoreEntry &e)
639{
640 bool keepInLocalMemory = false;
641
642 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
643 // Icons (and cache digests?) should stay in store_table until we
644 // have a dedicated storage for them (that would not purge them).
645 // They are not managed [well] by any specific Store handled below.
646 keepInLocalMemory = true;
daed75a9
EB
647 } else if (sharedMemStore) {
648 // leave keepInLocalMemory false; sharedMemStore maintains its own cache
649 } else if (localMemStore) {
2745fea5
AR
650 keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
651 // the local memory cache is not overflowing
4310f8b0 652 memoryCacheHasSpaceFor(memoryPagesDebt_);
2745fea5
AR
653 }
654
655 // An idle, unlocked entry that only belongs to a SwapDir which controls
656 // its own index, should not stay in the global store_table.
657 if (!dereferenceIdle(e, keepInLocalMemory)) {
bf95c10a 658 debugs(20, 5, "destroying unlocked entry: " << &e << ' ' << e);
2745fea5
AR
659 destroyStoreEntry(static_cast<hash_link*>(&e));
660 return;
661 }
662
bf95c10a 663 debugs(20, 5, "keepInLocalMemory: " << keepInLocalMemory);
2745fea5 664
8a07d123
AR
665 // formerly known as "WARNING: found KEY_PRIVATE"
666 assert(!EBIT_TEST(e.flags, KEY_PRIVATE));
667
2745fea5
AR
668 // TODO: move this into [non-shared] memory cache class when we have one
669 if (keepInLocalMemory) {
670 e.setMemStatus(IN_MEMORY);
671 e.mem_obj->unlinkRequest();
4310f8b0
EB
672 return;
673 }
674
675 // We know the in-memory data will be gone. Get rid of the entire entry if
676 // it has nothing worth preserving on disk either.
677 if (!e.swappedOut()) {
678 e.release(); // deletes e
679 return;
2745fea5 680 }
4310f8b0
EB
681
682 memoryEvictCached(e); // may already be gone
683 // and keep the entry in store_table for its on-disk data
2745fea5
AR
684}
685
55e1c6e8 686bool
66d51f4f 687Store::Controller::updateOnNotModified(StoreEntry *old, StoreEntry &e304)
abf396ec 688{
abf396ec 689 Must(old);
66d51f4f
AR
690 Must(old->mem_obj);
691 Must(e304.mem_obj);
692
693 // updateOnNotModified() may be called many times for the same old entry.
694 // e304.mem_obj->appliedUpdates value distinguishes two cases:
695 // false: Independent store clients revalidating the same old StoreEntry.
696 // Each such update uses its own e304. The old StoreEntry
697 // accumulates such independent updates.
698 // true: Store clients feeding off the same 304 response. Each such update
699 // uses the same e304. For timestamps correctness and performance
700 // sake, it is best to detect and skip such repeated update calls.
701 if (e304.mem_obj->appliedUpdates) {
702 debugs(20, 5, "ignored repeated update of " << *old << " with " << e304);
55e1c6e8 703 return true;
66d51f4f
AR
704 }
705 e304.mem_obj->appliedUpdates = true;
1a210de4 706
55e1c6e8
EB
707 try {
708 if (!old->updateOnNotModified(e304)) {
709 debugs(20, 5, "updated nothing in " << *old << " with " << e304);
710 return true;
711 }
712 } catch (...) {
713 debugs(20, DBG_IMPORTANT, "ERROR: Failed to update a cached response: " << CurrentException);
714 return false;
66d51f4f
AR
715 }
716
717 if (sharedMemStore && old->mem_status == IN_MEMORY && !EBIT_TEST(old->flags, ENTRY_SPECIAL))
718 sharedMemStore->updateHeaders(old);
abf396ec 719
66d51f4f 720 if (old->swap_dirn > -1)
e305d771 721 disks->updateHeaders(old);
55e1c6e8
EB
722
723 return true;
abf396ec
AR
724}
725
4310f8b0 726bool
2745fea5 727Store::Controller::allowCollapsing(StoreEntry *e, const RequestFlags &reqFlags,
8b082ed9 728 const HttpRequestMethod &)
2745fea5 729{
1a210de4 730 const KeyScope keyScope = reqFlags.refresh ? ksRevalidation : ksDefault;
d2a6dcba
EB
731 // set the flag now so that it gets copied into the Transients entry
732 e->setCollapsingRequirement(true);
4310f8b0
EB
733 if (e->makePublic(keyScope)) { // this is needed for both local and SMP collapsing
734 debugs(20, 3, "may " << (transients && e->hasTransients() ?
735 "SMP-" : "locally-") << "collapse " << *e);
24c93780 736 assert(e->hittingRequiresCollapsing());
4310f8b0
EB
737 return true;
738 }
d2a6dcba
EB
739 // paranoid cleanup; the flag is meaningless for private entries
740 e->setCollapsingRequirement(false);
4310f8b0
EB
741 return false;
742}
743
744void
745Store::Controller::addReading(StoreEntry *e, const cache_key *key)
746{
747 if (transients)
748 transients->monitorIo(e, key, Store::ioReading);
749 e->hashInsert(key);
750}
751
752void
753Store::Controller::addWriting(StoreEntry *e, const cache_key *key)
754{
755 assert(e);
756 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
757 return; // constant memory-resident entries do not need transients
758
2745fea5 759 if (transients)
4310f8b0
EB
760 transients->monitorIo(e, key, Store::ioWriting);
761 // else: non-SMP configurations do not need transients
2745fea5
AR
762}
763
764void
765Store::Controller::syncCollapsed(const sfileno xitIndex)
766{
767 assert(transients);
768
769 StoreEntry *collapsed = transients->findCollapsed(xitIndex);
4310f8b0 770 if (!collapsed) { // the entry is no longer active, ignore update
2745fea5
AR
771 debugs(20, 7, "not SMP-syncing not-transient " << xitIndex);
772 return;
773 }
4310f8b0
EB
774
775 if (!collapsed->locked()) {
776 debugs(20, 3, "skipping (and may destroy) unlocked " << *collapsed);
777 handleIdleEntry(*collapsed);
778 return;
779 }
780
2745fea5 781 assert(collapsed->mem_obj);
4310f8b0
EB
782
783 if (EBIT_TEST(collapsed->flags, ENTRY_ABORTED)) {
784 debugs(20, 3, "skipping already aborted " << *collapsed);
785 return;
786 }
2745fea5
AR
787
788 debugs(20, 7, "syncing " << *collapsed);
789
d2a6dcba
EB
790 Transients::EntryStatus entryStatus;
791 transients->status(*collapsed, entryStatus);
4310f8b0 792
d2a6dcba 793 if (entryStatus.waitingToBeFreed) {
4310f8b0
EB
794 debugs(20, 3, "will release " << *collapsed << " due to waitingToBeFreed");
795 collapsed->release(true); // may already be marked
796 }
797
798 if (transients->isWriter(*collapsed))
799 return; // readers can only change our waitingToBeFreed flag
800
801 assert(transients->isReader(*collapsed));
802
2745fea5
AR
803 bool found = false;
804 bool inSync = false;
a48f1beb 805 if (sharedMemStore && collapsed->mem_obj->memCache.io == Store::ioDone) {
2745fea5
AR
806 found = true;
807 inSync = true;
778610b5 808 debugs(20, 7, "already handled by memory store: " << *collapsed);
daed75a9 809 } else if (sharedMemStore && collapsed->hasMemStore()) {
2745fea5 810 found = true;
daed75a9 811 inSync = sharedMemStore->updateAnchored(*collapsed);
4310f8b0 812 // TODO: handle entries attached to both memory and disk
e305d771 813 } else if (collapsed->hasDisk()) {
2745fea5 814 found = true;
e305d771 815 inSync = disks->updateAnchored(*collapsed);
2745fea5 816 } else {
778610b5
AR
817 try {
818 found = anchorToCache(*collapsed);
819 inSync = found;
820 } catch (...) {
821 // TODO: Write an exception handler for the entire method.
822 debugs(20, 3, "anchorToCache() failed for " << *collapsed << ": " << CurrentException);
823 collapsed->abort();
824 return;
825 }
2745fea5
AR
826 }
827
d2a6dcba 828 if (entryStatus.waitingToBeFreed && !found) {
4310f8b0
EB
829 debugs(20, 3, "aborting unattached " << *collapsed <<
830 " because it was marked for deletion before we could attach it");
2745fea5
AR
831 collapsed->abort();
832 return;
833 }
834
835 if (inSync) {
836 debugs(20, 5, "synced " << *collapsed);
9358e99f
AR
837 assert(found);
838 collapsed->setCollapsingRequirement(false);
2745fea5 839 collapsed->invokeHandlers();
4310f8b0
EB
840 return;
841 }
842
843 if (found) { // unrecoverable problem syncing this entry
2745fea5
AR
844 debugs(20, 3, "aborting unsyncable " << *collapsed);
845 collapsed->abort();
4310f8b0 846 return;
2745fea5 847 }
4310f8b0 848
24c93780
AR
849 if (!entryStatus.hasWriter) {
850 debugs(20, 3, "aborting abandoned-by-writer " << *collapsed);
851 collapsed->abort();
852 return;
853 }
854
4310f8b0
EB
855 // the entry is still not in one of the caches
856 debugs(20, 7, "waiting " << *collapsed);
9358e99f 857 collapsed->setCollapsingRequirement(true);
2745fea5
AR
858}
859
778610b5 860/// If possible and has not been done, associates the entry with its store(s).
24c93780
AR
861/// \returns false for not-yet-cached entries that we may attach later
862/// \returns true for other entries after synchronizing them with their store
2745fea5 863bool
778610b5 864Store::Controller::anchorToCache(StoreEntry &entry)
2745fea5 865{
4310f8b0
EB
866 assert(entry.hasTransients());
867 assert(transientsReader(entry));
2745fea5 868
778610b5
AR
869 // TODO: Attach entries to both memory and disk
870
871 // TODO: Reduce code duplication with syncCollapsed()
a48f1beb 872 if (sharedMemStore && entry.mem().memCache.io == Store::ioDone) {
778610b5
AR
873 debugs(20, 5, "already handled by memory store: " << entry);
874 return true;
875 } else if (sharedMemStore && entry.hasMemStore()) {
876 debugs(20, 5, "already anchored to memory store: " << entry);
877 return true;
e305d771 878 } else if (entry.hasDisk()) {
778610b5
AR
879 debugs(20, 5, "already anchored to disk: " << entry);
880 return true;
881 }
882
4310f8b0 883 debugs(20, 7, "anchoring " << entry);
2745fea5 884
24c93780
AR
885 Transients::EntryStatus entryStatus;
886 transients->status(entry, entryStatus);
887
2745fea5 888 bool found = false;
daed75a9 889 if (sharedMemStore)
778610b5 890 found = sharedMemStore->anchorToCache(entry);
e305d771
A
891 if (!found)
892 found = disks->anchorToCache(entry);
2745fea5 893
778610b5 894 if (found) {
9358e99f 895 debugs(20, 7, "anchored " << entry);
9358e99f
AR
896 entry.setCollapsingRequirement(false);
897 return true;
898 }
899
778610b5
AR
900 if (entryStatus.waitingToBeFreed)
901 throw TextException("will never be able to anchor to an already marked entry", Here());
24c93780 902
778610b5
AR
903 if (!entryStatus.hasWriter)
904 throw TextException("will never be able to anchor to an abandoned-by-writer entry", Here());
2745fea5 905
24c93780 906 debugs(20, 7, "skipping not yet cached " << entry);
9358e99f 907 entry.setCollapsingRequirement(true);
24c93780 908 return false;
2745fea5
AR
909}
910
1a210de4 911bool
daed75a9 912Store::Controller::SmpAware()
1a210de4 913{
daed75a9 914 return MemStore::Enabled() || Disks::SmpAware();
1a210de4
EB
915}
916
4310f8b0
EB
917void
918Store::Controller::checkTransients(const StoreEntry &e) const
919{
920 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
921 return;
922 assert(!transients || e.hasTransients());
923}
924
2745fea5
AR
925Store::Controller&
926Store::Root()
927{
1f50e07b
AR
928 static const auto root = new Controller();
929 return *root;
2745fea5 930}
7d84d4ca 931