]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store/Controller.cc
Source Format Enforcement (#532)
[thirdparty/squid.git] / src / store / Controller.cc
CommitLineData
2745fea5 1/*
77b1029d 2 * Copyright (C) 1996-2020 The Squid Software Foundation and contributors
2745fea5
AR
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9/* DEBUG: section 20 Store Controller */
10
11#include "squid.h"
12#include "mem_node.h"
13#include "MemStore.h"
14#include "profiler/Profiler.h"
15#include "SquidConfig.h"
16#include "SquidMath.h"
17#include "store/Controller.h"
18#include "store/Disks.h"
19#include "store/LocalSearch.h"
20#include "tools.h"
21#include "Transients.h"
22
23#if HAVE_SYS_WAIT_H
24#include <sys/wait.h>
25#endif
26
27/*
28 * store_dirs_rebuilding is initialized to _1_ as a hack so that
29 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
30 * cache_dirs have been read. For example, without this hack, Squid
31 * will try to write clean log files if -kparse fails (becasue it
32 * calls fatal()).
33 */
34int Store::Controller::store_dirs_rebuilding = 1;
35
36Store::Controller::Controller() :
37 swapDir(new Disks),
daed75a9
EB
38 sharedMemStore(nullptr),
39 localMemStore(false),
2745fea5
AR
40 transients(NULL)
41{
42 assert(!store_table);
43}
44
45Store::Controller::~Controller()
46{
daed75a9 47 delete sharedMemStore;
2745fea5
AR
48 delete transients;
49 delete swapDir;
50
51 if (store_table) {
52 hashFreeItems(store_table, destroyStoreEntry);
53 hashFreeMemory(store_table);
54 store_table = nullptr;
55 }
56}
57
58void
59Store::Controller::init()
60{
daed75a9
EB
61 if (IamWorkerProcess()) {
62 if (MemStore::Enabled()) {
63 sharedMemStore = new MemStore;
64 sharedMemStore->init();
65 } else if (Config.memMaxSize > 0) {
66 localMemStore = true;
67 }
2745fea5
AR
68 }
69
70 swapDir->init();
71
daed75a9 72 if (Transients::Enabled() && IamWorkerProcess()) {
2745fea5
AR
73 transients = new Transients;
74 transients->init();
75 }
76}
77
78void
79Store::Controller::create()
80{
81 swapDir->create();
82
83#if !_SQUID_WINDOWS_
2745fea5 84 pid_t pid;
2745fea5 85 do {
7def3beb
MM
86 PidStatus status;
87 pid = WaitForAnyPid(status, WNOHANG);
2745fea5 88 } while (pid > 0 || (pid < 0 && errno == EINTR));
2745fea5
AR
89#endif
90}
91
92void
93Store::Controller::maintain()
94{
95 static time_t last_warn_time = 0;
96
97 PROF_start(storeMaintainSwapSpace);
98 swapDir->maintain();
99
100 /* this should be emitted by the oversize dir, not globally */
101
102 if (Root().currentSize() > Store::Root().maxSize()) {
103 if (squid_curtime - last_warn_time > 10) {
104 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
105 << Store::Root().currentSize() / 1024.0 << " KB > "
106 << (Store::Root().maxSize() >> 10) << " KB");
107 last_warn_time = squid_curtime;
108 }
109 }
110
111 PROF_stop(storeMaintainSwapSpace);
112}
113
114void
115Store::Controller::getStats(StoreInfoStats &stats) const
116{
daed75a9
EB
117 if (sharedMemStore)
118 sharedMemStore->getStats(stats);
2745fea5
AR
119 else {
120 // move this code to a non-shared memory cache class when we have it
121 stats.mem.shared = false;
122 stats.mem.capacity = Config.memMaxSize;
123 stats.mem.size = mem_node::StoreMemSize();
daed75a9
EB
124 if (localMemStore) {
125 // XXX: also count internal/in-transit objects
126 stats.mem.count = hot_obj_count;
127 } else {
128 // XXX: count internal/in-transit objects instead
129 stats.mem.count = hot_obj_count;
130 }
2745fea5
AR
131 }
132
133 swapDir->getStats(stats);
134
135 // low-level info not specific to memory or disk cache
136 stats.store_entry_count = StoreEntry::inUseCount();
137 stats.mem_object_count = MemObject::inUseCount();
138}
139
140void
141Store::Controller::stat(StoreEntry &output) const
142{
143 storeAppendPrintf(&output, "Store Directory Statistics:\n");
144 storeAppendPrintf(&output, "Store Entries : %lu\n",
145 (unsigned long int)StoreEntry::inUseCount());
146 storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
147 maxSize() >> 10);
148 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
149 currentSize() / 1024.0);
150 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
151 Math::doublePercent(currentSize(), maxSize()),
152 Math::doublePercent((maxSize() - currentSize()), maxSize()));
153
daed75a9
EB
154 if (sharedMemStore)
155 sharedMemStore->stat(output);
2745fea5
AR
156
157 /* now the swapDir */
158 swapDir->stat(output);
159}
160
161/* if needed, this could be taught to cache the result */
162uint64_t
163Store::Controller::maxSize() const
164{
165 /* TODO: include memory cache ? */
166 return swapDir->maxSize();
167}
168
169uint64_t
170Store::Controller::minSize() const
171{
172 /* TODO: include memory cache ? */
173 return swapDir->minSize();
174}
175
176uint64_t
177Store::Controller::currentSize() const
178{
179 /* TODO: include memory cache ? */
180 return swapDir->currentSize();
181}
182
183uint64_t
184Store::Controller::currentCount() const
185{
186 /* TODO: include memory cache ? */
187 return swapDir->currentCount();
188}
189
190int64_t
191Store::Controller::maxObjectSize() const
192{
193 /* TODO: include memory cache ? */
194 return swapDir->maxObjectSize();
195}
196
5ca027f0
AR
197void
198Store::Controller::updateLimits()
199{
200 swapDir->updateLimits();
201
202 store_swap_high = (long) (((float) maxSize() *
203 (float) Config.Swap.highWaterMark) / (float) 100);
204 store_swap_low = (long) (((float) maxSize() *
205 (float) Config.Swap.lowWaterMark) / (float) 100);
206 store_pages_max = Config.memMaxSize / sizeof(mem_node);
207
208 // TODO: move this into a memory cache class when we have one
209 const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize));
210 const int64_t disksMax = swapDir ? swapDir->maxObjectSize() : 0;
211 store_maxobjsize = std::max(disksMax, memMax);
212}
213
2745fea5
AR
214StoreSearch *
215Store::Controller::search()
216{
217 // this is the only kind of search we currently support
218 return NewLocalSearch();
219}
220
221void
222Store::Controller::sync(void)
223{
daed75a9
EB
224 if (sharedMemStore)
225 sharedMemStore->sync();
2745fea5
AR
226 swapDir->sync();
227}
228
229/*
230 * handle callbacks all avaliable fs'es
231 */
232int
233Store::Controller::callback()
234{
235 /* This will likely double count. Thats ok. */
236 PROF_start(storeDirCallback);
237
238 /* mem cache callbacks ? */
239 int result = swapDir->callback();
240
241 PROF_stop(storeDirCallback);
242
243 return result;
244}
245
685f3e9c 246/// update reference counters of the recently touched entry
2745fea5
AR
247void
248Store::Controller::referenceBusy(StoreEntry &e)
249{
250 // special entries do not belong to any specific Store, but are IN_MEMORY
251 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
252 return;
253
254 /* Notify the fs that we're referencing this object again */
255
4310f8b0 256 if (e.hasDisk())
2745fea5
AR
257 swapDir->reference(e);
258
259 // Notify the memory cache that we're referencing this object again
daed75a9
EB
260 if (sharedMemStore && e.mem_status == IN_MEMORY)
261 sharedMemStore->reference(e);
2745fea5
AR
262
263 // TODO: move this code to a non-shared memory cache class when we have it
264 if (e.mem_obj) {
265 if (mem_policy->Referenced)
266 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
267 }
268}
269
685f3e9c
AR
270/// dereference()s an idle entry
271/// \returns false if and only if the entry should be deleted
2745fea5
AR
272bool
273Store::Controller::dereferenceIdle(StoreEntry &e, bool wantsLocalMemory)
274{
275 // special entries do not belong to any specific Store, but are IN_MEMORY
276 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
277 return true;
278
279 bool keepInStoreTable = false; // keep only if somebody needs it there
280
281 /* Notify the fs that we're not referencing this object any more */
282
4310f8b0 283 if (e.hasDisk())
2745fea5
AR
284 keepInStoreTable = swapDir->dereference(e) || keepInStoreTable;
285
286 // Notify the memory cache that we're not referencing this object any more
daed75a9
EB
287 if (sharedMemStore && e.mem_status == IN_MEMORY)
288 keepInStoreTable = sharedMemStore->dereference(e) || keepInStoreTable;
2745fea5
AR
289
290 // TODO: move this code to a non-shared memory cache class when we have it
291 if (e.mem_obj) {
292 if (mem_policy->Dereferenced)
293 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
294 // non-shared memory cache relies on store_table
daed75a9 295 if (localMemStore)
2745fea5
AR
296 keepInStoreTable = wantsLocalMemory || keepInStoreTable;
297 }
298
299 return keepInStoreTable;
300}
301
4310f8b0
EB
302bool
303Store::Controller::markedForDeletion(const cache_key *key) const
304{
305 // assuming a public key, checking Transients should cover all cases.
306 return transients && transients->markedForDeletion(key);
307}
308
309bool
310Store::Controller::markedForDeletionAndAbandoned(const StoreEntry &e) const
311{
312 // The opposite check order could miss a reader that has arrived after the
313 // !readers() and before the markedForDeletion() check.
314 return markedForDeletion(reinterpret_cast<const cache_key*>(e.key)) &&
315 transients && !transients->readers(e);
316}
317
318bool
319Store::Controller::hasReadableDiskEntry(const StoreEntry &e) const
320{
321 return swapDir->hasReadableEntry(e);
322}
323
2745fea5 324StoreEntry *
4310f8b0 325Store::Controller::find(const cache_key *key)
2745fea5 326{
4310f8b0
EB
327 if (const auto entry = peek(key)) {
328 try {
329 if (!entry->key)
330 allowSharing(*entry, key);
331 checkTransients(*entry);
332 entry->touch();
333 referenceBusy(*entry);
334 return entry;
335 } catch (const std::exception &ex) {
336 debugs(20, 2, "failed with " << *entry << ": " << ex.what());
337 entry->release(true);
338 // fall through
339 }
2745fea5
AR
340 }
341 return NULL;
342}
343
4310f8b0
EB
344/// indexes and adds SMP-tracking for an ephemeral peek() result
345void
346Store::Controller::allowSharing(StoreEntry &entry, const cache_key *key)
347{
348 // TODO: refactor to throw on anchorToCache() inSync errors!
349
350 // anchorToCache() below and many find() callers expect a registered entry
351 addReading(&entry, key);
352
353 if (entry.hasTransients()) {
354 bool inSync = false;
355 const bool found = anchorToCache(entry, inSync);
356 if (found && !inSync)
357 throw TexcHere("cannot sync");
358 }
359}
360
2745fea5 361StoreEntry *
80d0fe08 362Store::Controller::findCallbackXXX(const cache_key *key)
2745fea5 363{
4310f8b0
EB
364 // We could check for mem_obj presence (and more), moving and merging some
365 // of the duplicated neighborsUdpAck() and neighborsHtcpReply() code here,
366 // but that would mean polluting Store with HTCP/ICP code. Instead, we
367 // should encapsulate callback-related data in a protocol-neutral MemObject
368 // member or use an HTCP/ICP-specific index rather than store_table.
80d0fe08
EB
369
370 // cannot reuse peekAtLocal() because HTCP/ICP callbacks may use private keys
371 return static_cast<StoreEntry*>(hash_lookup(store_table, key));
4310f8b0 372}
2745fea5 373
4310f8b0
EB
374/// \returns either an existing local reusable StoreEntry object or nil
375/// To treat remotely marked entries specially,
376/// callers ought to check markedForDeletion() first!
377StoreEntry *
378Store::Controller::peekAtLocal(const cache_key *key)
379{
2745fea5 380 if (StoreEntry *e = static_cast<StoreEntry*>(hash_lookup(store_table, key))) {
4310f8b0
EB
381 // callers must only search for public entries
382 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
383 assert(e->publicKey());
384 checkTransients(*e);
385
2745fea5
AR
386 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
387 // because their backing store slot may be gone already.
4310f8b0
EB
388 return e;
389 }
390 return nullptr;
391}
392
393StoreEntry *
394Store::Controller::peek(const cache_key *key)
395{
396 debugs(20, 3, storeKeyText(key));
397
398 if (markedForDeletion(key)) {
399 debugs(20, 3, "ignoring marked in-transit " << storeKeyText(key));
400 return nullptr;
401 }
402
403 if (StoreEntry *e = peekAtLocal(key)) {
404 debugs(20, 3, "got local in-transit entry: " << *e);
2745fea5
AR
405 return e;
406 }
407
408 // Must search transients before caches because we must sync those we find.
409 if (transients) {
410 if (StoreEntry *e = transients->get(key)) {
411 debugs(20, 3, "got shared in-transit entry: " << *e);
4310f8b0 412 return e;
2745fea5
AR
413 }
414 }
415
daed75a9
EB
416 if (sharedMemStore) {
417 if (StoreEntry *e = sharedMemStore->get(key)) {
2745fea5
AR
418 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
419 return e;
420 }
421 }
422
423 if (swapDir) {
424 if (StoreEntry *e = swapDir->get(key)) {
425 debugs(20, 3, "got disk-cached entry: " << *e);
426 return e;
427 }
428 }
429
430 debugs(20, 4, "cannot locate " << storeKeyText(key));
431 return nullptr;
432}
433
4310f8b0
EB
434bool
435Store::Controller::transientsReader(const StoreEntry &e) const
436{
437 return transients && e.hasTransients() && transients->isReader(e);
438}
439
440bool
441Store::Controller::transientsWriter(const StoreEntry &e) const
442{
443 return transients && e.hasTransients() && transients->isWriter(e);
444}
445
5ca027f0
AR
446int64_t
447Store::Controller::accumulateMore(StoreEntry &entry) const
448{
449 return swapDir ? swapDir->accumulateMore(entry) : 0;
450 // The memory cache should not influence for-swapout accumulation decision.
451}
452
4310f8b0
EB
453// Must be called from StoreEntry::release() or releaseRequest() because
454// those methods currently manage local indexing of StoreEntry objects.
455// TODO: Replace StoreEntry::release*() with Root().evictCached().
456void
457Store::Controller::evictCached(StoreEntry &e)
458{
459 debugs(20, 7, e);
460 if (transients)
461 transients->evictCached(e);
462 memoryEvictCached(e);
463 if (swapDir)
464 swapDir->evictCached(e);
465}
466
2745fea5 467void
4310f8b0
EB
468Store::Controller::evictIfFound(const cache_key *key)
469{
470 debugs(20, 7, storeKeyText(key));
471
472 if (StoreEntry *entry = peekAtLocal(key)) {
473 debugs(20, 5, "marking local in-transit " << *entry);
474 entry->release(true);
475 return;
476 }
477
daed75a9
EB
478 if (sharedMemStore)
479 sharedMemStore->evictIfFound(key);
4310f8b0
EB
480 if (swapDir)
481 swapDir->evictIfFound(key);
482 if (transients)
483 transients->evictIfFound(key);
484}
485
486/// whether the memory cache is allowed to store that many additional pages
487bool
488Store::Controller::memoryCacheHasSpaceFor(const int pagesRequired) const
2745fea5 489{
4310f8b0
EB
490 // XXX: We count mem_nodes but may free shared memory pages instead.
491 const auto fits = mem_node::InUseCount() + pagesRequired <= store_pages_max;
492 debugs(20, 7, fits << ": " << mem_node::InUseCount() << '+' << pagesRequired << '?' << store_pages_max);
493 return fits;
2745fea5
AR
494}
495
496void
4310f8b0 497Store::Controller::freeMemorySpace(const int bytesRequired)
2745fea5 498{
4310f8b0
EB
499 const auto pagesRequired = (bytesRequired + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
500
501 if (memoryCacheHasSpaceFor(pagesRequired))
502 return;
503
504 // XXX: When store_pages_max is smaller than pagesRequired, we should not
505 // look for more space (but we do because we want to abandon idle entries?).
506
507 // limit our performance impact to one walk per second
508 static time_t lastWalk = 0;
509 if (lastWalk == squid_curtime)
510 return;
511 lastWalk = squid_curtime;
512
513 debugs(20, 2, "need " << pagesRequired << " pages");
514
515 // let abandon()/handleIdleEntry() know about the impeding memory shortage
516 memoryPagesDebt_ = pagesRequired;
517
518 // XXX: SMP-unaware: Walkers should iterate memory cache, not store_table.
519 // XXX: Limit iterations by time, not arbitrary count.
520 const auto walker = mem_policy->PurgeInit(mem_policy, 100000);
521 int removed = 0;
522 while (const auto entry = walker->Next(walker)) {
523 // Abandoned memory cache entries are purged during memory shortage.
524 entry->abandon(__FUNCTION__); // may delete entry
525 ++removed;
526
527 if (memoryCacheHasSpaceFor(pagesRequired))
528 break;
529 }
530 // TODO: Move to RemovalPolicyWalker::Done() that has more/better details.
531 debugs(20, 3, "removed " << removed << " out of " << hot_obj_count << " memory-cached entries");
532 walker->Done(walker);
533 memoryPagesDebt_ = 0;
2745fea5
AR
534}
535
536// move this into [non-shared] memory cache class when we have one
537/// whether e should be kept in local RAM for possible future caching
538bool
539Store::Controller::keepForLocalMemoryCache(StoreEntry &e) const
540{
541 if (!e.memoryCachable())
542 return false;
543
544 // does the current and expected size obey memory caching limits?
545 assert(e.mem_obj);
546 const int64_t loadedSize = e.mem_obj->endOffset();
547 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
548 const int64_t ramSize = max(loadedSize, expectedSize);
549 const int64_t ramLimit = min(
550 static_cast<int64_t>(Config.memMaxSize),
551 static_cast<int64_t>(Config.Store.maxInMemObjSize));
552 return ramSize <= ramLimit;
553}
554
555void
556Store::Controller::memoryOut(StoreEntry &e, const bool preserveSwappable)
557{
558 bool keepInLocalMemory = false;
daed75a9
EB
559 if (sharedMemStore)
560 sharedMemStore->write(e); // leave keepInLocalMemory false
561 else if (localMemStore)
2745fea5
AR
562 keepInLocalMemory = keepForLocalMemoryCache(e);
563
564 debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
565
566 if (!keepInLocalMemory)
567 e.trimMemory(preserveSwappable);
568}
569
4310f8b0
EB
570/// removes the entry from the memory cache
571/// XXX: Dangerous side effect: Unlocked entries lose their mem_obj.
2745fea5 572void
4310f8b0 573Store::Controller::memoryEvictCached(StoreEntry &e)
2745fea5 574{
4310f8b0 575 // TODO: Untangle memory caching from mem_obj.
daed75a9
EB
576 if (sharedMemStore)
577 sharedMemStore->evictCached(e);
2745fea5 578 else // TODO: move into [non-shared] memory cache class when we have one
4310f8b0
EB
579 if (!e.locked())
580 e.destroyMemObject();
2745fea5
AR
581}
582
583void
584Store::Controller::memoryDisconnect(StoreEntry &e)
585{
daed75a9
EB
586 if (sharedMemStore)
587 sharedMemStore->disconnect(e);
2745fea5
AR
588 // else nothing to do for non-shared memory cache
589}
590
591void
4310f8b0 592Store::Controller::stopSharing(StoreEntry &e)
2745fea5 593{
4310f8b0
EB
594 // Marking the transients entry is sufficient to prevent new readers from
595 // starting to wait for `e` updates and to inform the current readers (and,
596 // hence, Broadcast() recipients) about the underlying Store problems.
597 if (transients && e.hasTransients())
598 transients->evictCached(e);
2745fea5
AR
599}
600
601void
602Store::Controller::transientsCompleteWriting(StoreEntry &e)
603{
4310f8b0
EB
604 // transients->isWriter(e) is false if `e` is writing to its second store
605 // after finishing writing to its first store: At the end of the first swap
606 // out, the transients writer becomes a reader and (XXX) we never switch
607 // back to writing, even if we start swapping out again (to another store).
608 if (transients && e.hasTransients() && transients->isWriter(e))
609 transients->completeWriting(e);
2745fea5
AR
610}
611
612int
613Store::Controller::transientReaders(const StoreEntry &e) const
614{
4310f8b0 615 return (transients && e.hasTransients()) ?
2745fea5
AR
616 transients->readers(e) : 0;
617}
618
619void
4310f8b0 620Store::Controller::transientsDisconnect(StoreEntry &e)
2745fea5
AR
621{
622 if (transients)
4310f8b0 623 transients->disconnect(e);
2745fea5
AR
624}
625
d2a6dcba
EB
626void
627Store::Controller::transientsClearCollapsingRequirement(StoreEntry &e)
628{
629 if (transients)
630 transients->clearCollapsingRequirement(e);
631}
632
2745fea5
AR
633void
634Store::Controller::handleIdleEntry(StoreEntry &e)
635{
636 bool keepInLocalMemory = false;
637
638 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
639 // Icons (and cache digests?) should stay in store_table until we
640 // have a dedicated storage for them (that would not purge them).
641 // They are not managed [well] by any specific Store handled below.
642 keepInLocalMemory = true;
daed75a9
EB
643 } else if (sharedMemStore) {
644 // leave keepInLocalMemory false; sharedMemStore maintains its own cache
645 } else if (localMemStore) {
2745fea5
AR
646 keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
647 // the local memory cache is not overflowing
4310f8b0 648 memoryCacheHasSpaceFor(memoryPagesDebt_);
2745fea5
AR
649 }
650
651 // An idle, unlocked entry that only belongs to a SwapDir which controls
652 // its own index, should not stay in the global store_table.
653 if (!dereferenceIdle(e, keepInLocalMemory)) {
654 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
655 destroyStoreEntry(static_cast<hash_link*>(&e));
656 return;
657 }
658
659 debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
660
661 // TODO: move this into [non-shared] memory cache class when we have one
662 if (keepInLocalMemory) {
663 e.setMemStatus(IN_MEMORY);
664 e.mem_obj->unlinkRequest();
4310f8b0
EB
665 return;
666 }
667
668 // We know the in-memory data will be gone. Get rid of the entire entry if
669 // it has nothing worth preserving on disk either.
670 if (!e.swappedOut()) {
671 e.release(); // deletes e
672 return;
2745fea5 673 }
4310f8b0
EB
674
675 memoryEvictCached(e); // may already be gone
676 // and keep the entry in store_table for its on-disk data
2745fea5
AR
677}
678
abf396ec 679void
66d51f4f 680Store::Controller::updateOnNotModified(StoreEntry *old, StoreEntry &e304)
abf396ec 681{
abf396ec 682 Must(old);
66d51f4f
AR
683 Must(old->mem_obj);
684 Must(e304.mem_obj);
685
686 // updateOnNotModified() may be called many times for the same old entry.
687 // e304.mem_obj->appliedUpdates value distinguishes two cases:
688 // false: Independent store clients revalidating the same old StoreEntry.
689 // Each such update uses its own e304. The old StoreEntry
690 // accumulates such independent updates.
691 // true: Store clients feeding off the same 304 response. Each such update
692 // uses the same e304. For timestamps correctness and performance
693 // sake, it is best to detect and skip such repeated update calls.
694 if (e304.mem_obj->appliedUpdates) {
695 debugs(20, 5, "ignored repeated update of " << *old << " with " << e304);
696 return;
697 }
698 e304.mem_obj->appliedUpdates = true;
1a210de4 699
66d51f4f
AR
700 if (!old->updateOnNotModified(e304)) {
701 debugs(20, 5, "updated nothing in " << *old << " with " << e304);
1a210de4 702 return;
66d51f4f
AR
703 }
704
705 if (sharedMemStore && old->mem_status == IN_MEMORY && !EBIT_TEST(old->flags, ENTRY_SPECIAL))
706 sharedMemStore->updateHeaders(old);
abf396ec 707
66d51f4f
AR
708 if (old->swap_dirn > -1)
709 swapDir->updateHeaders(old);
abf396ec
AR
710}
711
4310f8b0 712bool
2745fea5 713Store::Controller::allowCollapsing(StoreEntry *e, const RequestFlags &reqFlags,
7d84d4ca 714 const HttpRequestMethod &reqMethod)
2745fea5 715{
1a210de4 716 const KeyScope keyScope = reqFlags.refresh ? ksRevalidation : ksDefault;
d2a6dcba
EB
717 // set the flag now so that it gets copied into the Transients entry
718 e->setCollapsingRequirement(true);
4310f8b0
EB
719 if (e->makePublic(keyScope)) { // this is needed for both local and SMP collapsing
720 debugs(20, 3, "may " << (transients && e->hasTransients() ?
721 "SMP-" : "locally-") << "collapse " << *e);
722 return true;
723 }
d2a6dcba
EB
724 // paranoid cleanup; the flag is meaningless for private entries
725 e->setCollapsingRequirement(false);
4310f8b0
EB
726 return false;
727}
728
729void
730Store::Controller::addReading(StoreEntry *e, const cache_key *key)
731{
732 if (transients)
733 transients->monitorIo(e, key, Store::ioReading);
734 e->hashInsert(key);
735}
736
737void
738Store::Controller::addWriting(StoreEntry *e, const cache_key *key)
739{
740 assert(e);
741 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
742 return; // constant memory-resident entries do not need transients
743
2745fea5 744 if (transients)
4310f8b0
EB
745 transients->monitorIo(e, key, Store::ioWriting);
746 // else: non-SMP configurations do not need transients
2745fea5
AR
747}
748
749void
750Store::Controller::syncCollapsed(const sfileno xitIndex)
751{
752 assert(transients);
753
754 StoreEntry *collapsed = transients->findCollapsed(xitIndex);
4310f8b0 755 if (!collapsed) { // the entry is no longer active, ignore update
2745fea5
AR
756 debugs(20, 7, "not SMP-syncing not-transient " << xitIndex);
757 return;
758 }
4310f8b0
EB
759
760 if (!collapsed->locked()) {
761 debugs(20, 3, "skipping (and may destroy) unlocked " << *collapsed);
762 handleIdleEntry(*collapsed);
763 return;
764 }
765
2745fea5 766 assert(collapsed->mem_obj);
4310f8b0
EB
767
768 if (EBIT_TEST(collapsed->flags, ENTRY_ABORTED)) {
769 debugs(20, 3, "skipping already aborted " << *collapsed);
770 return;
771 }
2745fea5
AR
772
773 debugs(20, 7, "syncing " << *collapsed);
774
d2a6dcba
EB
775 Transients::EntryStatus entryStatus;
776 transients->status(*collapsed, entryStatus);
4310f8b0 777
d2a6dcba
EB
778 if (!entryStatus.collapsed) {
779 debugs(20, 5, "removing collapsing requirement for " << *collapsed << " since remote writer probably got headers");
780 collapsed->setCollapsingRequirement(false);
781 }
782
783 if (entryStatus.waitingToBeFreed) {
4310f8b0
EB
784 debugs(20, 3, "will release " << *collapsed << " due to waitingToBeFreed");
785 collapsed->release(true); // may already be marked
786 }
787
788 if (transients->isWriter(*collapsed))
789 return; // readers can only change our waitingToBeFreed flag
790
791 assert(transients->isReader(*collapsed));
792
d2a6dcba 793 if (entryStatus.abortedByWriter) {
4310f8b0
EB
794 debugs(20, 3, "aborting " << *collapsed << " because its writer has aborted");
795 collapsed->abort();
796 return;
797 }
798
d2a6dcba
EB
799 if (entryStatus.collapsed && !collapsed->hittingRequiresCollapsing()) {
800 debugs(20, 3, "aborting " << *collapsed << " due to writer/reader collapsing state mismatch");
801 collapsed->abort();
802 return;
803 }
804
2745fea5
AR
805 bool found = false;
806 bool inSync = false;
daed75a9 807 if (sharedMemStore && collapsed->mem_obj->memCache.io == MemObject::ioDone) {
2745fea5
AR
808 found = true;
809 inSync = true;
810 debugs(20, 7, "fully mem-loaded " << *collapsed);
daed75a9 811 } else if (sharedMemStore && collapsed->hasMemStore()) {
2745fea5 812 found = true;
daed75a9 813 inSync = sharedMemStore->updateAnchored(*collapsed);
4310f8b0
EB
814 // TODO: handle entries attached to both memory and disk
815 } else if (swapDir && collapsed->hasDisk()) {
2745fea5 816 found = true;
4310f8b0 817 inSync = swapDir->updateAnchored(*collapsed);
2745fea5 818 } else {
4310f8b0 819 found = anchorToCache(*collapsed, inSync);
2745fea5
AR
820 }
821
d2a6dcba 822 if (entryStatus.waitingToBeFreed && !found) {
4310f8b0
EB
823 debugs(20, 3, "aborting unattached " << *collapsed <<
824 " because it was marked for deletion before we could attach it");
2745fea5
AR
825 collapsed->abort();
826 return;
827 }
828
829 if (inSync) {
830 debugs(20, 5, "synced " << *collapsed);
831 collapsed->invokeHandlers();
4310f8b0
EB
832 return;
833 }
834
835 if (found) { // unrecoverable problem syncing this entry
2745fea5
AR
836 debugs(20, 3, "aborting unsyncable " << *collapsed);
837 collapsed->abort();
4310f8b0 838 return;
2745fea5 839 }
4310f8b0
EB
840
841 // the entry is still not in one of the caches
842 debugs(20, 7, "waiting " << *collapsed);
2745fea5
AR
843}
844
4310f8b0 845/// Called for Transients entries that are not yet anchored to a cache.
2745fea5
AR
846/// For cached entries, return true after synchronizing them with their cache
847/// (making inSync true on success). For not-yet-cached entries, return false.
848bool
4310f8b0 849Store::Controller::anchorToCache(StoreEntry &entry, bool &inSync)
2745fea5 850{
4310f8b0
EB
851 assert(entry.hasTransients());
852 assert(transientsReader(entry));
2745fea5 853
4310f8b0 854 debugs(20, 7, "anchoring " << entry);
2745fea5
AR
855
856 bool found = false;
daed75a9
EB
857 if (sharedMemStore)
858 found = sharedMemStore->anchorToCache(entry, inSync);
2745fea5 859 if (!found && swapDir)
4310f8b0 860 found = swapDir->anchorToCache(entry, inSync);
2745fea5
AR
861
862 if (found) {
863 if (inSync)
4310f8b0 864 debugs(20, 7, "anchored " << entry);
2745fea5 865 else
4310f8b0 866 debugs(20, 5, "failed to anchor " << entry);
2745fea5 867 } else {
4310f8b0 868 debugs(20, 7, "skipping not yet cached " << entry);
2745fea5
AR
869 }
870
871 return found;
872}
873
1a210de4 874bool
daed75a9 875Store::Controller::SmpAware()
1a210de4 876{
daed75a9 877 return MemStore::Enabled() || Disks::SmpAware();
1a210de4
EB
878}
879
4310f8b0
EB
880void
881Store::Controller::checkTransients(const StoreEntry &e) const
882{
883 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
884 return;
885 assert(!transients || e.hasTransients());
886}
887
2745fea5 888namespace Store {
7d84d4ca 889static RefCount<Controller> TheRoot;
2745fea5
AR
890}
891
892Store::Controller&
893Store::Root()
894{
895 assert(TheRoot);
896 return *TheRoot;
897}
898
899void
900Store::Init(Controller *root)
901{
902 TheRoot = root ? root : new Controller;
903}
904
905void
906Store::FreeMemory()
907{
908 TheRoot = nullptr;
909}
7d84d4ca 910