]> git.ipfire.org Git - thirdparty/squid.git/blob - src/store/Controller.cc
4309d8337d4eb803647959b7ce3c44154850fd2d
[thirdparty/squid.git] / src / store / Controller.cc
1 /*
2 * Copyright (C) 1996-2022 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 20 Store Controller */
10
11 #include "squid.h"
12 #include "mem_node.h"
13 #include "MemStore.h"
14 #include "SquidConfig.h"
15 #include "SquidMath.h"
16 #include "store/Controller.h"
17 #include "store/Disks.h"
18 #include "store/LocalSearch.h"
19 #include "tools.h"
20 #include "Transients.h"
21
22 #if HAVE_SYS_WAIT_H
23 #include <sys/wait.h>
24 #endif
25
26 /*
27 * store_dirs_rebuilding is initialized to _1_ as a hack so that
28 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
29 * cache_dirs have been read. For example, without this hack, Squid
30 * will try to write clean log files if -kparse fails (because it
31 * calls fatal()).
32 */
33 int Store::Controller::store_dirs_rebuilding = 1;
34
35 Store::Controller::Controller() :
36 swapDir(new Disks),
37 sharedMemStore(nullptr),
38 localMemStore(false),
39 transients(nullptr)
40 {
41 assert(!store_table);
42 }
43
44 Store::Controller::~Controller()
45 {
46 delete sharedMemStore;
47 delete transients;
48 delete swapDir;
49
50 if (store_table) {
51 hashFreeItems(store_table, destroyStoreEntry);
52 hashFreeMemory(store_table);
53 store_table = nullptr;
54 }
55 }
56
57 void
58 Store::Controller::init()
59 {
60 if (IamWorkerProcess()) {
61 if (MemStore::Enabled()) {
62 sharedMemStore = new MemStore;
63 sharedMemStore->init();
64 } else if (Config.memMaxSize > 0) {
65 localMemStore = true;
66 }
67 }
68
69 swapDir->init();
70
71 if (Transients::Enabled() && IamWorkerProcess()) {
72 transients = new Transients;
73 transients->init();
74 }
75 }
76
77 void
78 Store::Controller::create()
79 {
80 swapDir->create();
81
82 #if !_SQUID_WINDOWS_
83 pid_t pid;
84 do {
85 PidStatus status;
86 pid = WaitForAnyPid(status, WNOHANG);
87 } while (pid > 0 || (pid < 0 && errno == EINTR));
88 #endif
89 }
90
91 void
92 Store::Controller::maintain()
93 {
94 static time_t last_warn_time = 0;
95
96 swapDir->maintain();
97
98 /* this should be emitted by the oversize dir, not globally */
99
100 if (Root().currentSize() > Store::Root().maxSize()) {
101 if (squid_curtime - last_warn_time > 10) {
102 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
103 << Store::Root().currentSize() / 1024.0 << " KB > "
104 << (Store::Root().maxSize() >> 10) << " KB");
105 last_warn_time = squid_curtime;
106 }
107 }
108 }
109
110 void
111 Store::Controller::getStats(StoreInfoStats &stats) const
112 {
113 if (sharedMemStore)
114 sharedMemStore->getStats(stats);
115 else {
116 // move this code to a non-shared memory cache class when we have it
117 stats.mem.shared = false;
118 stats.mem.capacity = Config.memMaxSize;
119 stats.mem.size = mem_node::StoreMemSize();
120 if (localMemStore) {
121 // XXX: also count internal/in-transit objects
122 stats.mem.count = hot_obj_count;
123 } else {
124 // XXX: count internal/in-transit objects instead
125 stats.mem.count = hot_obj_count;
126 }
127 }
128
129 swapDir->getStats(stats);
130
131 // low-level info not specific to memory or disk cache
132 stats.store_entry_count = StoreEntry::inUseCount();
133 stats.mem_object_count = MemObject::inUseCount();
134 }
135
136 void
137 Store::Controller::stat(StoreEntry &output) const
138 {
139 storeAppendPrintf(&output, "Store Directory Statistics:\n");
140 storeAppendPrintf(&output, "Store Entries : %lu\n",
141 (unsigned long int)StoreEntry::inUseCount());
142 storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
143 maxSize() >> 10);
144 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
145 currentSize() / 1024.0);
146 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
147 Math::doublePercent(currentSize(), maxSize()),
148 Math::doublePercent((maxSize() - currentSize()), maxSize()));
149
150 if (sharedMemStore)
151 sharedMemStore->stat(output);
152
153 /* now the swapDir */
154 swapDir->stat(output);
155 }
156
157 /* if needed, this could be taught to cache the result */
158 uint64_t
159 Store::Controller::maxSize() const
160 {
161 /* TODO: include memory cache ? */
162 return swapDir->maxSize();
163 }
164
165 uint64_t
166 Store::Controller::minSize() const
167 {
168 /* TODO: include memory cache ? */
169 return swapDir->minSize();
170 }
171
172 uint64_t
173 Store::Controller::currentSize() const
174 {
175 /* TODO: include memory cache ? */
176 return swapDir->currentSize();
177 }
178
179 uint64_t
180 Store::Controller::currentCount() const
181 {
182 /* TODO: include memory cache ? */
183 return swapDir->currentCount();
184 }
185
186 int64_t
187 Store::Controller::maxObjectSize() const
188 {
189 /* TODO: include memory cache ? */
190 return swapDir->maxObjectSize();
191 }
192
193 void
194 Store::Controller::configure()
195 {
196 swapDir->configure();
197
198 store_swap_high = (long) (((float) maxSize() *
199 (float) Config.Swap.highWaterMark) / (float) 100);
200 store_swap_low = (long) (((float) maxSize() *
201 (float) Config.Swap.lowWaterMark) / (float) 100);
202 store_pages_max = Config.memMaxSize / sizeof(mem_node);
203
204 // TODO: move this into a memory cache class when we have one
205 const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize));
206 const int64_t disksMax = swapDir ? swapDir->maxObjectSize() : 0;
207 store_maxobjsize = std::max(disksMax, memMax);
208 }
209
210 StoreSearch *
211 Store::Controller::search()
212 {
213 // this is the only kind of search we currently support
214 return NewLocalSearch();
215 }
216
217 void
218 Store::Controller::sync(void)
219 {
220 if (sharedMemStore)
221 sharedMemStore->sync();
222 swapDir->sync();
223 }
224
225 /*
226 * handle callbacks all available fs'es
227 */
228 int
229 Store::Controller::callback()
230 {
231 /* mem cache callbacks ? */
232 return swapDir->callback();
233 }
234
235 /// update reference counters of the recently touched entry
236 void
237 Store::Controller::referenceBusy(StoreEntry &e)
238 {
239 // special entries do not belong to any specific Store, but are IN_MEMORY
240 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
241 return;
242
243 /* Notify the fs that we're referencing this object again */
244
245 if (e.hasDisk())
246 swapDir->reference(e);
247
248 // Notify the memory cache that we're referencing this object again
249 if (sharedMemStore && e.mem_status == IN_MEMORY)
250 sharedMemStore->reference(e);
251
252 // TODO: move this code to a non-shared memory cache class when we have it
253 if (e.mem_obj) {
254 if (mem_policy->Referenced)
255 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
256 }
257 }
258
259 /// dereference()s an idle entry
260 /// \returns false if and only if the entry should be deleted
261 bool
262 Store::Controller::dereferenceIdle(StoreEntry &e, bool wantsLocalMemory)
263 {
264 // special entries do not belong to any specific Store, but are IN_MEMORY
265 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
266 return true;
267
268 // idle private entries cannot be reused
269 if (EBIT_TEST(e.flags, KEY_PRIVATE))
270 return false;
271
272 bool keepInStoreTable = false; // keep only if somebody needs it there
273
274 // Notify the fs that we are not referencing this object any more. This
275 // should be done even if we overwrite keepInStoreTable afterwards.
276
277 if (e.hasDisk())
278 keepInStoreTable = swapDir->dereference(e) || keepInStoreTable;
279
280 // Notify the memory cache that we're not referencing this object any more
281 if (sharedMemStore && e.mem_status == IN_MEMORY)
282 keepInStoreTable = sharedMemStore->dereference(e) || keepInStoreTable;
283
284 // TODO: move this code to a non-shared memory cache class when we have it
285 if (e.mem_obj) {
286 if (mem_policy->Dereferenced)
287 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
288 // non-shared memory cache relies on store_table
289 if (localMemStore)
290 keepInStoreTable = wantsLocalMemory || keepInStoreTable;
291 }
292
293 if (e.hittingRequiresCollapsing()) {
294 // If we were writing this now-locally-idle entry, then we did not
295 // finish and should now destroy an incomplete entry. Otherwise, do not
296 // leave this idle StoreEntry behind because handleIMSReply() lacks
297 // freshness checks when hitting a collapsed revalidation entry.
298 keepInStoreTable = false; // may overrule fs decisions made above
299 }
300
301 return keepInStoreTable;
302 }
303
304 bool
305 Store::Controller::markedForDeletion(const cache_key *key) const
306 {
307 // assuming a public key, checking Transients should cover all cases.
308 return transients && transients->markedForDeletion(key);
309 }
310
311 bool
312 Store::Controller::markedForDeletionAndAbandoned(const StoreEntry &e) const
313 {
314 // The opposite check order could miss a reader that has arrived after the
315 // !readers() and before the markedForDeletion() check.
316 return markedForDeletion(reinterpret_cast<const cache_key*>(e.key)) &&
317 transients && !transients->readers(e);
318 }
319
320 bool
321 Store::Controller::hasReadableDiskEntry(const StoreEntry &e) const
322 {
323 return swapDir->hasReadableEntry(e);
324 }
325
326 /// flags problematic entries before find() commits to finalizing/returning them
327 void
328 Store::Controller::checkFoundCandidate(const StoreEntry &entry) const
329 {
330 checkTransients(entry);
331
332 // The "hittingRequiresCollapsing() has an active writer" checks below
333 // protect callers from getting stuck and/or from using a stale revalidation
334 // reply. However, these protections are not reliable because the writer may
335 // disappear at any time and/or without a trace. Collapsing adds risks...
336 if (entry.hittingRequiresCollapsing()) {
337 if (entry.hasTransients()) {
338 // Too late to check here because the writer may be gone by now, but
339 // Transients do check when they setCollapsingRequirement().
340 } else {
341 // a local writer must hold a lock on its writable entry
342 if (!(entry.locked() && entry.isAccepting()))
343 throw TextException("no local writer", Here());
344 }
345 }
346 }
347
348 StoreEntry *
349 Store::Controller::find(const cache_key *key)
350 {
351 if (const auto entry = peek(key)) {
352 try {
353 if (!entry->key)
354 allowSharing(*entry, key);
355 checkFoundCandidate(*entry);
356 entry->touch();
357 referenceBusy(*entry);
358 return entry;
359 } catch (const std::exception &ex) {
360 debugs(20, 2, "failed with " << *entry << ": " << ex.what());
361 entry->release();
362 // fall through
363 }
364 }
365 return nullptr;
366 }
367
368 /// indexes and adds SMP-tracking for an ephemeral peek() result
369 void
370 Store::Controller::allowSharing(StoreEntry &entry, const cache_key *key)
371 {
372 // TODO: refactor to throw on anchorToCache() inSync errors!
373
374 // anchorToCache() below and many find() callers expect a registered entry
375 addReading(&entry, key);
376
377 if (entry.hasTransients()) {
378 // store hadWriter before computing `found`; \see Transients::get()
379 const auto hadWriter = transients->hasWriter(entry);
380 bool inSync = false;
381 const bool found = anchorToCache(entry, inSync);
382 if (found && !inSync)
383 throw TexcHere("cannot sync");
384 if (!found) {
385 // !found should imply hittingRequiresCollapsing() regardless of writer presence
386 if (!entry.hittingRequiresCollapsing()) {
387 debugs(20, DBG_IMPORTANT, "ERROR: Squid BUG: missing ENTRY_REQUIRES_COLLAPSING for " << entry);
388 throw TextException("transients entry missing ENTRY_REQUIRES_COLLAPSING", Here());
389 }
390
391 if (!hadWriter) {
392 // prevent others from falling into the same trap
393 throw TextException("unattached transients entry missing writer", Here());
394 }
395 }
396 }
397 }
398
399 StoreEntry *
400 Store::Controller::findCallbackXXX(const cache_key *key)
401 {
402 // We could check for mem_obj presence (and more), moving and merging some
403 // of the duplicated neighborsUdpAck() and neighborsHtcpReply() code here,
404 // but that would mean polluting Store with HTCP/ICP code. Instead, we
405 // should encapsulate callback-related data in a protocol-neutral MemObject
406 // member or use an HTCP/ICP-specific index rather than store_table.
407
408 // cannot reuse peekAtLocal() because HTCP/ICP callbacks may use private keys
409 return static_cast<StoreEntry*>(hash_lookup(store_table, key));
410 }
411
412 /// \returns either an existing local reusable StoreEntry object or nil
413 /// To treat remotely marked entries specially,
414 /// callers ought to check markedForDeletion() first!
415 StoreEntry *
416 Store::Controller::peekAtLocal(const cache_key *key)
417 {
418 if (StoreEntry *e = static_cast<StoreEntry*>(hash_lookup(store_table, key))) {
419 // callers must only search for public entries
420 assert(!EBIT_TEST(e->flags, KEY_PRIVATE));
421 assert(e->publicKey());
422 checkTransients(*e);
423
424 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
425 // because their backing store slot may be gone already.
426 return e;
427 }
428 return nullptr;
429 }
430
431 StoreEntry *
432 Store::Controller::peek(const cache_key *key)
433 {
434 debugs(20, 3, storeKeyText(key));
435
436 if (markedForDeletion(key)) {
437 debugs(20, 3, "ignoring marked in-transit " << storeKeyText(key));
438 return nullptr;
439 }
440
441 if (StoreEntry *e = peekAtLocal(key)) {
442 debugs(20, 3, "got local in-transit entry: " << *e);
443 return e;
444 }
445
446 // Must search transients before caches because we must sync those we find.
447 if (transients) {
448 if (StoreEntry *e = transients->get(key)) {
449 debugs(20, 3, "got shared in-transit entry: " << *e);
450 return e;
451 }
452 }
453
454 if (sharedMemStore) {
455 if (StoreEntry *e = sharedMemStore->get(key)) {
456 debugs(20, 3, "got mem-cached entry: " << *e);
457 return e;
458 }
459 }
460
461 if (swapDir) {
462 if (StoreEntry *e = swapDir->get(key)) {
463 debugs(20, 3, "got disk-cached entry: " << *e);
464 return e;
465 }
466 }
467
468 debugs(20, 4, "cannot locate " << storeKeyText(key));
469 return nullptr;
470 }
471
472 bool
473 Store::Controller::transientsReader(const StoreEntry &e) const
474 {
475 return transients && e.hasTransients() && transients->isReader(e);
476 }
477
478 bool
479 Store::Controller::transientsWriter(const StoreEntry &e) const
480 {
481 return transients && e.hasTransients() && transients->isWriter(e);
482 }
483
484 int64_t
485 Store::Controller::accumulateMore(StoreEntry &entry) const
486 {
487 return swapDir ? swapDir->accumulateMore(entry) : 0;
488 // The memory cache should not influence for-swapout accumulation decision.
489 }
490
491 // Must be called from StoreEntry::release() or releaseRequest() because
492 // those methods currently manage local indexing of StoreEntry objects.
493 // TODO: Replace StoreEntry::release*() with Root().evictCached().
494 void
495 Store::Controller::evictCached(StoreEntry &e)
496 {
497 debugs(20, 7, e);
498 if (transients)
499 transients->evictCached(e);
500 memoryEvictCached(e);
501 if (swapDir)
502 swapDir->evictCached(e);
503 }
504
505 void
506 Store::Controller::evictIfFound(const cache_key *key)
507 {
508 debugs(20, 7, storeKeyText(key));
509
510 if (StoreEntry *entry = peekAtLocal(key)) {
511 debugs(20, 5, "marking local in-transit " << *entry);
512 entry->release(true);
513 return;
514 }
515
516 if (sharedMemStore)
517 sharedMemStore->evictIfFound(key);
518 if (swapDir)
519 swapDir->evictIfFound(key);
520 if (transients)
521 transients->evictIfFound(key);
522 }
523
524 /// whether the memory cache is allowed to store that many additional pages
525 bool
526 Store::Controller::memoryCacheHasSpaceFor(const int pagesRequired) const
527 {
528 // XXX: We count mem_nodes but may free shared memory pages instead.
529 const auto fits = mem_node::InUseCount() + pagesRequired <= store_pages_max;
530 debugs(20, 7, fits << ": " << mem_node::InUseCount() << '+' << pagesRequired << '?' << store_pages_max);
531 return fits;
532 }
533
534 void
535 Store::Controller::freeMemorySpace(const int bytesRequired)
536 {
537 const auto pagesRequired = (bytesRequired + SM_PAGE_SIZE-1) / SM_PAGE_SIZE;
538
539 if (memoryCacheHasSpaceFor(pagesRequired))
540 return;
541
542 // XXX: When store_pages_max is smaller than pagesRequired, we should not
543 // look for more space (but we do because we want to abandon idle entries?).
544
545 // limit our performance impact to one walk per second
546 static time_t lastWalk = 0;
547 if (lastWalk == squid_curtime)
548 return;
549 lastWalk = squid_curtime;
550
551 debugs(20, 2, "need " << pagesRequired << " pages");
552
553 // let abandon()/handleIdleEntry() know about the impeding memory shortage
554 memoryPagesDebt_ = pagesRequired;
555
556 // XXX: SMP-unaware: Walkers should iterate memory cache, not store_table.
557 // XXX: Limit iterations by time, not arbitrary count.
558 const auto walker = mem_policy->PurgeInit(mem_policy, 100000);
559 int removed = 0;
560 while (const auto entry = walker->Next(walker)) {
561 // Abandoned memory cache entries are purged during memory shortage.
562 entry->abandon(__FUNCTION__); // may delete entry
563 ++removed;
564
565 if (memoryCacheHasSpaceFor(pagesRequired))
566 break;
567 }
568 // TODO: Move to RemovalPolicyWalker::Done() that has more/better details.
569 debugs(20, 3, "removed " << removed << " out of " << hot_obj_count << " memory-cached entries");
570 walker->Done(walker);
571 memoryPagesDebt_ = 0;
572 }
573
574 // move this into [non-shared] memory cache class when we have one
575 /// whether e should be kept in local RAM for possible future caching
576 bool
577 Store::Controller::keepForLocalMemoryCache(StoreEntry &e) const
578 {
579 if (!e.memoryCachable())
580 return false;
581
582 // does the current and expected size obey memory caching limits?
583 assert(e.mem_obj);
584 const int64_t loadedSize = e.mem_obj->endOffset();
585 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
586 const int64_t ramSize = max(loadedSize, expectedSize);
587 const int64_t ramLimit = min(
588 static_cast<int64_t>(Config.memMaxSize),
589 static_cast<int64_t>(Config.Store.maxInMemObjSize));
590 return ramSize <= ramLimit;
591 }
592
593 void
594 Store::Controller::memoryOut(StoreEntry &e, const bool preserveSwappable)
595 {
596 bool keepInLocalMemory = false;
597 if (sharedMemStore)
598 sharedMemStore->write(e); // leave keepInLocalMemory false
599 else if (localMemStore)
600 keepInLocalMemory = keepForLocalMemoryCache(e);
601
602 debugs(20, 7, "keepInLocalMemory: " << keepInLocalMemory);
603
604 if (!keepInLocalMemory)
605 e.trimMemory(preserveSwappable);
606 }
607
608 /// removes the entry from the memory cache
609 /// XXX: Dangerous side effect: Unlocked entries lose their mem_obj.
610 void
611 Store::Controller::memoryEvictCached(StoreEntry &e)
612 {
613 // TODO: Untangle memory caching from mem_obj.
614 if (sharedMemStore)
615 sharedMemStore->evictCached(e);
616 else // TODO: move into [non-shared] memory cache class when we have one
617 if (!e.locked())
618 e.destroyMemObject();
619 }
620
621 void
622 Store::Controller::memoryDisconnect(StoreEntry &e)
623 {
624 if (sharedMemStore)
625 sharedMemStore->disconnect(e);
626 // else nothing to do for non-shared memory cache
627 }
628
629 void
630 Store::Controller::noteStoppedSharedWriting(StoreEntry &e)
631 {
632 if (transients && e.hasTransients()) // paranoid: the caller should check
633 transients->completeWriting(e);
634 }
635
636 int
637 Store::Controller::transientReaders(const StoreEntry &e) const
638 {
639 return (transients && e.hasTransients()) ?
640 transients->readers(e) : 0;
641 }
642
643 void
644 Store::Controller::transientsDisconnect(StoreEntry &e)
645 {
646 if (transients)
647 transients->disconnect(e);
648 }
649
650 void
651 Store::Controller::transientsClearCollapsingRequirement(StoreEntry &e)
652 {
653 if (transients)
654 transients->clearCollapsingRequirement(e);
655 }
656
657 void
658 Store::Controller::handleIdleEntry(StoreEntry &e)
659 {
660 bool keepInLocalMemory = false;
661
662 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
663 // Icons (and cache digests?) should stay in store_table until we
664 // have a dedicated storage for them (that would not purge them).
665 // They are not managed [well] by any specific Store handled below.
666 keepInLocalMemory = true;
667 } else if (sharedMemStore) {
668 // leave keepInLocalMemory false; sharedMemStore maintains its own cache
669 } else if (localMemStore) {
670 keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
671 // the local memory cache is not overflowing
672 memoryCacheHasSpaceFor(memoryPagesDebt_);
673 }
674
675 // An idle, unlocked entry that only belongs to a SwapDir which controls
676 // its own index, should not stay in the global store_table.
677 if (!dereferenceIdle(e, keepInLocalMemory)) {
678 debugs(20, 5, "destroying unlocked entry: " << &e << ' ' << e);
679 destroyStoreEntry(static_cast<hash_link*>(&e));
680 return;
681 }
682
683 debugs(20, 5, "keepInLocalMemory: " << keepInLocalMemory);
684
685 // formerly known as "WARNING: found KEY_PRIVATE"
686 assert(!EBIT_TEST(e.flags, KEY_PRIVATE));
687
688 // TODO: move this into [non-shared] memory cache class when we have one
689 if (keepInLocalMemory) {
690 e.setMemStatus(IN_MEMORY);
691 e.mem_obj->unlinkRequest();
692 return;
693 }
694
695 // We know the in-memory data will be gone. Get rid of the entire entry if
696 // it has nothing worth preserving on disk either.
697 if (!e.swappedOut()) {
698 e.release(); // deletes e
699 return;
700 }
701
702 memoryEvictCached(e); // may already be gone
703 // and keep the entry in store_table for its on-disk data
704 }
705
706 void
707 Store::Controller::updateOnNotModified(StoreEntry *old, StoreEntry &e304)
708 {
709 Must(old);
710 Must(old->mem_obj);
711 Must(e304.mem_obj);
712
713 // updateOnNotModified() may be called many times for the same old entry.
714 // e304.mem_obj->appliedUpdates value distinguishes two cases:
715 // false: Independent store clients revalidating the same old StoreEntry.
716 // Each such update uses its own e304. The old StoreEntry
717 // accumulates such independent updates.
718 // true: Store clients feeding off the same 304 response. Each such update
719 // uses the same e304. For timestamps correctness and performance
720 // sake, it is best to detect and skip such repeated update calls.
721 if (e304.mem_obj->appliedUpdates) {
722 debugs(20, 5, "ignored repeated update of " << *old << " with " << e304);
723 return;
724 }
725 e304.mem_obj->appliedUpdates = true;
726
727 if (!old->updateOnNotModified(e304)) {
728 debugs(20, 5, "updated nothing in " << *old << " with " << e304);
729 return;
730 }
731
732 if (sharedMemStore && old->mem_status == IN_MEMORY && !EBIT_TEST(old->flags, ENTRY_SPECIAL))
733 sharedMemStore->updateHeaders(old);
734
735 if (old->swap_dirn > -1)
736 swapDir->updateHeaders(old);
737 }
738
739 bool
740 Store::Controller::allowCollapsing(StoreEntry *e, const RequestFlags &reqFlags,
741 const HttpRequestMethod &)
742 {
743 const KeyScope keyScope = reqFlags.refresh ? ksRevalidation : ksDefault;
744 // set the flag now so that it gets copied into the Transients entry
745 e->setCollapsingRequirement(true);
746 if (e->makePublic(keyScope)) { // this is needed for both local and SMP collapsing
747 debugs(20, 3, "may " << (transients && e->hasTransients() ?
748 "SMP-" : "locally-") << "collapse " << *e);
749 assert(e->hittingRequiresCollapsing());
750 return true;
751 }
752 // paranoid cleanup; the flag is meaningless for private entries
753 e->setCollapsingRequirement(false);
754 return false;
755 }
756
757 void
758 Store::Controller::addReading(StoreEntry *e, const cache_key *key)
759 {
760 if (transients)
761 transients->monitorIo(e, key, Store::ioReading);
762 e->hashInsert(key);
763 }
764
765 void
766 Store::Controller::addWriting(StoreEntry *e, const cache_key *key)
767 {
768 assert(e);
769 if (EBIT_TEST(e->flags, ENTRY_SPECIAL))
770 return; // constant memory-resident entries do not need transients
771
772 if (transients)
773 transients->monitorIo(e, key, Store::ioWriting);
774 // else: non-SMP configurations do not need transients
775 }
776
777 void
778 Store::Controller::syncCollapsed(const sfileno xitIndex)
779 {
780 assert(transients);
781
782 StoreEntry *collapsed = transients->findCollapsed(xitIndex);
783 if (!collapsed) { // the entry is no longer active, ignore update
784 debugs(20, 7, "not SMP-syncing not-transient " << xitIndex);
785 return;
786 }
787
788 if (!collapsed->locked()) {
789 debugs(20, 3, "skipping (and may destroy) unlocked " << *collapsed);
790 handleIdleEntry(*collapsed);
791 return;
792 }
793
794 assert(collapsed->mem_obj);
795
796 if (EBIT_TEST(collapsed->flags, ENTRY_ABORTED)) {
797 debugs(20, 3, "skipping already aborted " << *collapsed);
798 return;
799 }
800
801 debugs(20, 7, "syncing " << *collapsed);
802
803 Transients::EntryStatus entryStatus;
804 transients->status(*collapsed, entryStatus);
805
806 if (!entryStatus.collapsed) {
807 debugs(20, 5, "removing collapsing requirement for " << *collapsed << " since remote writer probably got headers");
808 collapsed->setCollapsingRequirement(false);
809 }
810
811 if (entryStatus.waitingToBeFreed) {
812 debugs(20, 3, "will release " << *collapsed << " due to waitingToBeFreed");
813 collapsed->release(true); // may already be marked
814 }
815
816 if (transients->isWriter(*collapsed))
817 return; // readers can only change our waitingToBeFreed flag
818
819 assert(transients->isReader(*collapsed));
820
821 if (entryStatus.collapsed && !collapsed->hittingRequiresCollapsing()) {
822 debugs(20, 3, "aborting " << *collapsed << " due to writer/reader collapsing state mismatch");
823 collapsed->abort();
824 return;
825 }
826
827 bool found = false;
828 bool inSync = false;
829 if (sharedMemStore && collapsed->mem_obj->memCache.io == MemObject::ioDone) {
830 found = true;
831 inSync = true;
832 debugs(20, 7, "fully mem-loaded " << *collapsed);
833 } else if (sharedMemStore && collapsed->hasMemStore()) {
834 found = true;
835 inSync = sharedMemStore->updateAnchored(*collapsed);
836 // TODO: handle entries attached to both memory and disk
837 } else if (swapDir && collapsed->hasDisk()) {
838 found = true;
839 inSync = swapDir->updateAnchored(*collapsed);
840 } else {
841 found = anchorToCache(*collapsed, inSync);
842 }
843
844 if (entryStatus.waitingToBeFreed && !found) {
845 debugs(20, 3, "aborting unattached " << *collapsed <<
846 " because it was marked for deletion before we could attach it");
847 collapsed->abort();
848 return;
849 }
850
851 if (inSync) {
852 debugs(20, 5, "synced " << *collapsed);
853 collapsed->invokeHandlers();
854 return;
855 }
856
857 if (found) { // unrecoverable problem syncing this entry
858 debugs(20, 3, "aborting unsyncable " << *collapsed);
859 collapsed->abort();
860 return;
861 }
862
863 if (!entryStatus.hasWriter) {
864 debugs(20, 3, "aborting abandoned-by-writer " << *collapsed);
865 collapsed->abort();
866 return;
867 }
868
869 // the entry is still not in one of the caches
870 debugs(20, 7, "waiting " << *collapsed);
871 }
872
873 /// Called for Transients entries that are not yet anchored to a cache.
874 /// \returns false for not-yet-cached entries that we may attach later
875 /// \returns true for other entries after synchronizing them with their store
876 /// and setting inSync to reflect that synchronization outcome.
877 bool
878 Store::Controller::anchorToCache(StoreEntry &entry, bool &inSync)
879 {
880 assert(entry.hasTransients());
881 assert(transientsReader(entry));
882
883 debugs(20, 7, "anchoring " << entry);
884
885 Transients::EntryStatus entryStatus;
886 transients->status(entry, entryStatus);
887
888 bool found = false;
889 if (sharedMemStore)
890 found = sharedMemStore->anchorToCache(entry, inSync);
891 if (!found && swapDir)
892 found = swapDir->anchorToCache(entry, inSync);
893
894 if (found) {
895 if (inSync)
896 debugs(20, 7, "anchored " << entry);
897 else
898 debugs(20, 5, "failed to anchor " << entry);
899 return true;
900 }
901
902 if (entryStatus.waitingToBeFreed) {
903 debugs(20, 5, "failed on marked unattached " << entry);
904 inSync = false;
905 return true;
906 }
907
908 if (!entryStatus.hasWriter) {
909 debugs(20, 5, "failed on abandoned-by-writer " << entry);
910 inSync = false;
911 return true;
912 }
913
914 debugs(20, 7, "skipping not yet cached " << entry);
915 return false;
916 }
917
918 bool
919 Store::Controller::SmpAware()
920 {
921 return MemStore::Enabled() || Disks::SmpAware();
922 }
923
924 void
925 Store::Controller::checkTransients(const StoreEntry &e) const
926 {
927 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
928 return;
929 assert(!transients || e.hasTransients());
930 }
931
932 namespace Store {
933 static RefCount<Controller> TheRoot;
934 }
935
936 Store::Controller&
937 Store::Root()
938 {
939 assert(TheRoot);
940 return *TheRoot;
941 }
942
943 void
944 Store::Init(Controller *root)
945 {
946 TheRoot = root ? root : new Controller;
947 }
948
949 void
950 Store::FreeMemory()
951 {
952 TheRoot = nullptr;
953 }
954