]>
Commit | Line | Data |
---|---|---|
2745fea5 | 1 | /* |
b8ae064d | 2 | * Copyright (C) 1996-2023 The Squid Software Foundation and contributors |
2745fea5 AR |
3 | * |
4 | * Squid software is distributed under GPLv2+ license and includes | |
5 | * contributions from numerous individuals and organizations. | |
6 | * Please see the COPYING and CONTRIBUTORS files for details. | |
7 | */ | |
8 | ||
9 | /* DEBUG: section 20 Store Controller */ | |
10 | ||
11 | #include "squid.h" | |
12 | #include "mem_node.h" | |
13 | #include "MemStore.h" | |
2745fea5 AR |
14 | #include "SquidConfig.h" |
15 | #include "SquidMath.h" | |
16 | #include "store/Controller.h" | |
17 | #include "store/Disks.h" | |
18 | #include "store/LocalSearch.h" | |
19 | #include "tools.h" | |
20 | #include "Transients.h" | |
21 | ||
22 | #if HAVE_SYS_WAIT_H | |
23 | #include <sys/wait.h> | |
24 | #endif | |
25 | ||
26 | /* | |
27 | * store_dirs_rebuilding is initialized to _1_ as a hack so that | |
28 | * storeDirWriteCleanLogs() doesn't try to do anything unless _all_ | |
29 | * cache_dirs have been read. For example, without this hack, Squid | |
2f8abb64 | 30 | * will try to write clean log files if -kparse fails (because it |
2745fea5 AR |
31 | * calls fatal()). |
32 | */ | |
33 | int Store::Controller::store_dirs_rebuilding = 1; | |
34 | ||
35 | Store::Controller::Controller() : | |
e305d771 | 36 | disks(new Disks), |
daed75a9 EB |
37 | sharedMemStore(nullptr), |
38 | localMemStore(false), | |
aee3523a | 39 | transients(nullptr) |
2745fea5 AR |
40 | { |
41 | assert(!store_table); | |
42 | } | |
43 | ||
1f50e07b | 44 | /// this destructor is never called because Controller singleton is immortal |
2745fea5 AR |
45 | Store::Controller::~Controller() |
46 | { | |
1f50e07b AR |
47 | // assert at runtime because we cannot `= delete` an overridden destructor |
48 | assert(!"Controller is never destroyed"); | |
2745fea5 AR |
49 | } |
50 | ||
51 | void | |
52 | Store::Controller::init() | |
53 | { | |
daed75a9 EB |
54 | if (IamWorkerProcess()) { |
55 | if (MemStore::Enabled()) { | |
56 | sharedMemStore = new MemStore; | |
57 | sharedMemStore->init(); | |
58 | } else if (Config.memMaxSize > 0) { | |
59 | localMemStore = true; | |
60 | } | |
2745fea5 AR |
61 | } |
62 | ||
e305d771 | 63 | disks->init(); |
2745fea5 | 64 | |
daed75a9 | 65 | if (Transients::Enabled() && IamWorkerProcess()) { |
2745fea5 AR |
66 | transients = new Transients; |
67 | transients->init(); | |
68 | } | |
69 | } | |
70 | ||
71 | void | |
72 | Store::Controller::create() | |
73 | { | |
e305d771 | 74 | disks->create(); |
2745fea5 AR |
75 | |
76 | #if !_SQUID_WINDOWS_ | |
2745fea5 | 77 | pid_t pid; |
2745fea5 | 78 | do { |
7def3beb MM |
79 | PidStatus status; |
80 | pid = WaitForAnyPid(status, WNOHANG); | |
2745fea5 | 81 | } while (pid > 0 || (pid < 0 && errno == EINTR)); |
2745fea5 AR |
82 | #endif |
83 | } | |
84 | ||
85 | void | |
86 | Store::Controller::maintain() | |
87 | { | |
88 | static time_t last_warn_time = 0; | |
89 | ||
e305d771 | 90 | disks->maintain(); |
2745fea5 AR |
91 | |
92 | /* this should be emitted by the oversize dir, not globally */ | |
93 | ||
94 | if (Root().currentSize() > Store::Root().maxSize()) { | |
95 | if (squid_curtime - last_warn_time > 10) { | |
96 | debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: " | |
97 | << Store::Root().currentSize() / 1024.0 << " KB > " | |
98 | << (Store::Root().maxSize() >> 10) << " KB"); | |
99 | last_warn_time = squid_curtime; | |
100 | } | |
101 | } | |
2745fea5 AR |
102 | } |
103 | ||
104 | void | |
105 | Store::Controller::getStats(StoreInfoStats &stats) const | |
106 | { | |
daed75a9 EB |
107 | if (sharedMemStore) |
108 | sharedMemStore->getStats(stats); | |
2745fea5 AR |
109 | else { |
110 | // move this code to a non-shared memory cache class when we have it | |
111 | stats.mem.shared = false; | |
112 | stats.mem.capacity = Config.memMaxSize; | |
113 | stats.mem.size = mem_node::StoreMemSize(); | |
daed75a9 EB |
114 | if (localMemStore) { |
115 | // XXX: also count internal/in-transit objects | |
116 | stats.mem.count = hot_obj_count; | |
117 | } else { | |
118 | // XXX: count internal/in-transit objects instead | |
119 | stats.mem.count = hot_obj_count; | |
120 | } | |
2745fea5 AR |
121 | } |
122 | ||
e305d771 | 123 | disks->getStats(stats); |
2745fea5 AR |
124 | |
125 | // low-level info not specific to memory or disk cache | |
126 | stats.store_entry_count = StoreEntry::inUseCount(); | |
127 | stats.mem_object_count = MemObject::inUseCount(); | |
128 | } | |
129 | ||
130 | void | |
131 | Store::Controller::stat(StoreEntry &output) const | |
132 | { | |
133 | storeAppendPrintf(&output, "Store Directory Statistics:\n"); | |
134 | storeAppendPrintf(&output, "Store Entries : %lu\n", | |
135 | (unsigned long int)StoreEntry::inUseCount()); | |
136 | storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n", | |
137 | maxSize() >> 10); | |
138 | storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n", | |
139 | currentSize() / 1024.0); | |
140 | storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n", | |
141 | Math::doublePercent(currentSize(), maxSize()), | |
142 | Math::doublePercent((maxSize() - currentSize()), maxSize())); | |
143 | ||
daed75a9 EB |
144 | if (sharedMemStore) |
145 | sharedMemStore->stat(output); | |
2745fea5 | 146 | |
e305d771 | 147 | disks->stat(output); |
2745fea5 AR |
148 | } |
149 | ||
150 | /* if needed, this could be taught to cache the result */ | |
151 | uint64_t | |
152 | Store::Controller::maxSize() const | |
153 | { | |
154 | /* TODO: include memory cache ? */ | |
e305d771 | 155 | return disks->maxSize(); |
2745fea5 AR |
156 | } |
157 | ||
158 | uint64_t | |
159 | Store::Controller::minSize() const | |
160 | { | |
161 | /* TODO: include memory cache ? */ | |
e305d771 | 162 | return disks->minSize(); |
2745fea5 AR |
163 | } |
164 | ||
165 | uint64_t | |
166 | Store::Controller::currentSize() const | |
167 | { | |
168 | /* TODO: include memory cache ? */ | |
e305d771 | 169 | return disks->currentSize(); |
2745fea5 AR |
170 | } |
171 | ||
172 | uint64_t | |
173 | Store::Controller::currentCount() const | |
174 | { | |
175 | /* TODO: include memory cache ? */ | |
e305d771 | 176 | return disks->currentCount(); |
2745fea5 AR |
177 | } |
178 | ||
179 | int64_t | |
180 | Store::Controller::maxObjectSize() const | |
181 | { | |
182 | /* TODO: include memory cache ? */ | |
e305d771 | 183 | return disks->maxObjectSize(); |
2745fea5 AR |
184 | } |
185 | ||
5ca027f0 | 186 | void |
5d84beb5 | 187 | Store::Controller::configure() |
5ca027f0 | 188 | { |
e305d771 | 189 | disks->configure(); |
5ca027f0 AR |
190 | |
191 | store_swap_high = (long) (((float) maxSize() * | |
192 | (float) Config.Swap.highWaterMark) / (float) 100); | |
193 | store_swap_low = (long) (((float) maxSize() * | |
194 | (float) Config.Swap.lowWaterMark) / (float) 100); | |
195 | store_pages_max = Config.memMaxSize / sizeof(mem_node); | |
196 | ||
197 | // TODO: move this into a memory cache class when we have one | |
198 | const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize)); | |
e305d771 | 199 | const int64_t disksMax = disks->maxObjectSize(); |
5ca027f0 AR |
200 | store_maxobjsize = std::max(disksMax, memMax); |
201 | } | |
202 | ||
2745fea5 AR |
203 | StoreSearch * |
204 | Store::Controller::search() | |
205 | { | |
206 | // this is the only kind of search we currently support | |
207 | return NewLocalSearch(); | |
208 | } | |
209 | ||
210 | void | |
211 | Store::Controller::sync(void) | |
212 | { | |
daed75a9 EB |
213 | if (sharedMemStore) |
214 | sharedMemStore->sync(); | |
e305d771 | 215 | disks->sync(); |
2745fea5 AR |
216 | } |
217 | ||
218 | /* | |
2f8abb64 | 219 | * handle callbacks all available fs'es |
2745fea5 AR |
220 | */ |
221 | int | |
222 | Store::Controller::callback() | |
223 | { | |
2745fea5 | 224 | /* mem cache callbacks ? */ |
e305d771 | 225 | return disks->callback(); |
2745fea5 AR |
226 | } |
227 | ||
685f3e9c | 228 | /// update reference counters of the recently touched entry |
2745fea5 AR |
229 | void |
230 | Store::Controller::referenceBusy(StoreEntry &e) | |
231 | { | |
232 | // special entries do not belong to any specific Store, but are IN_MEMORY | |
233 | if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) | |
234 | return; | |
235 | ||
236 | /* Notify the fs that we're referencing this object again */ | |
237 | ||
4310f8b0 | 238 | if (e.hasDisk()) |
e305d771 | 239 | disks->reference(e); |
2745fea5 AR |
240 | |
241 | // Notify the memory cache that we're referencing this object again | |
daed75a9 EB |
242 | if (sharedMemStore && e.mem_status == IN_MEMORY) |
243 | sharedMemStore->reference(e); | |
2745fea5 AR |
244 | |
245 | // TODO: move this code to a non-shared memory cache class when we have it | |
246 | if (e.mem_obj) { | |
247 | if (mem_policy->Referenced) | |
248 | mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl); | |
249 | } | |
250 | } | |
251 | ||
685f3e9c AR |
252 | /// dereference()s an idle entry |
253 | /// \returns false if and only if the entry should be deleted | |
2745fea5 AR |
254 | bool |
255 | Store::Controller::dereferenceIdle(StoreEntry &e, bool wantsLocalMemory) | |
256 | { | |
257 | // special entries do not belong to any specific Store, but are IN_MEMORY | |
258 | if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) | |
259 | return true; | |
260 | ||
8a07d123 AR |
261 | // idle private entries cannot be reused |
262 | if (EBIT_TEST(e.flags, KEY_PRIVATE)) | |
263 | return false; | |
264 | ||
2745fea5 AR |
265 | bool keepInStoreTable = false; // keep only if somebody needs it there |
266 | ||
6c8cbe63 D |
267 | // Notify the fs that we are not referencing this object any more. This |
268 | // should be done even if we overwrite keepInStoreTable afterwards. | |
2745fea5 | 269 | |
4310f8b0 | 270 | if (e.hasDisk()) |
e305d771 | 271 | keepInStoreTable = disks->dereference(e) || keepInStoreTable; |
2745fea5 AR |
272 | |
273 | // Notify the memory cache that we're not referencing this object any more | |
daed75a9 EB |
274 | if (sharedMemStore && e.mem_status == IN_MEMORY) |
275 | keepInStoreTable = sharedMemStore->dereference(e) || keepInStoreTable; | |
2745fea5 AR |
276 | |
277 | // TODO: move this code to a non-shared memory cache class when we have it | |
278 | if (e.mem_obj) { | |
279 | if (mem_policy->Dereferenced) | |
280 | mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl); | |
281 | // non-shared memory cache relies on store_table | |
daed75a9 | 282 | if (localMemStore) |
2745fea5 AR |
283 | keepInStoreTable = wantsLocalMemory || keepInStoreTable; |
284 | } | |
285 | ||
6c8cbe63 D |
286 | if (e.hittingRequiresCollapsing()) { |
287 | // If we were writing this now-locally-idle entry, then we did not | |
288 | // finish and should now destroy an incomplete entry. Otherwise, do not | |
289 | // leave this idle StoreEntry behind because handleIMSReply() lacks | |
290 | // freshness checks when hitting a collapsed revalidation entry. | |
291 | keepInStoreTable = false; // may overrule fs decisions made above | |
292 | } | |
293 | ||
2745fea5 AR |
294 | return keepInStoreTable; |
295 | } | |
296 | ||
4310f8b0 EB |
297 | bool |
298 | Store::Controller::markedForDeletion(const cache_key *key) const | |
299 | { | |
300 | // assuming a public key, checking Transients should cover all cases. | |
301 | return transients && transients->markedForDeletion(key); | |
302 | } | |
303 | ||
304 | bool | |
305 | Store::Controller::markedForDeletionAndAbandoned(const StoreEntry &e) const | |
306 | { | |
307 | // The opposite check order could miss a reader that has arrived after the | |
308 | // !readers() and before the markedForDeletion() check. | |
309 | return markedForDeletion(reinterpret_cast<const cache_key*>(e.key)) && | |
310 | transients && !transients->readers(e); | |
311 | } | |
312 | ||
313 | bool | |
314 | Store::Controller::hasReadableDiskEntry(const StoreEntry &e) const | |
315 | { | |
e305d771 | 316 | return disks->hasReadableEntry(e); |
4310f8b0 EB |
317 | } |
318 | ||
6c8cbe63 D |
319 | /// flags problematic entries before find() commits to finalizing/returning them |
320 | void | |
321 | Store::Controller::checkFoundCandidate(const StoreEntry &entry) const | |
322 | { | |
323 | checkTransients(entry); | |
324 | ||
325 | // The "hittingRequiresCollapsing() has an active writer" checks below | |
326 | // protect callers from getting stuck and/or from using a stale revalidation | |
327 | // reply. However, these protections are not reliable because the writer may | |
328 | // disappear at any time and/or without a trace. Collapsing adds risks... | |
329 | if (entry.hittingRequiresCollapsing()) { | |
330 | if (entry.hasTransients()) { | |
331 | // Too late to check here because the writer may be gone by now, but | |
332 | // Transients do check when they setCollapsingRequirement(). | |
333 | } else { | |
334 | // a local writer must hold a lock on its writable entry | |
335 | if (!(entry.locked() && entry.isAccepting())) | |
336 | throw TextException("no local writer", Here()); | |
337 | } | |
338 | } | |
339 | } | |
340 | ||
2745fea5 | 341 | StoreEntry * |
4310f8b0 | 342 | Store::Controller::find(const cache_key *key) |
2745fea5 | 343 | { |
4310f8b0 EB |
344 | if (const auto entry = peek(key)) { |
345 | try { | |
346 | if (!entry->key) | |
347 | allowSharing(*entry, key); | |
6c8cbe63 | 348 | checkFoundCandidate(*entry); |
4310f8b0 EB |
349 | entry->touch(); |
350 | referenceBusy(*entry); | |
351 | return entry; | |
352 | } catch (const std::exception &ex) { | |
353 | debugs(20, 2, "failed with " << *entry << ": " << ex.what()); | |
d1d3b4dc | 354 | entry->release(); |
4310f8b0 EB |
355 | // fall through |
356 | } | |
2745fea5 | 357 | } |
aee3523a | 358 | return nullptr; |
2745fea5 AR |
359 | } |
360 | ||
4310f8b0 EB |
361 | /// indexes and adds SMP-tracking for an ephemeral peek() result |
362 | void | |
363 | Store::Controller::allowSharing(StoreEntry &entry, const cache_key *key) | |
364 | { | |
4310f8b0 EB |
365 | // anchorToCache() below and many find() callers expect a registered entry |
366 | addReading(&entry, key); | |
367 | ||
368 | if (entry.hasTransients()) { | |
6c8cbe63 D |
369 | // store hadWriter before computing `found`; \see Transients::get() |
370 | const auto hadWriter = transients->hasWriter(entry); | |
778610b5 | 371 | const auto found = anchorToCache(entry); |
d1d3b4dc EB |
372 | if (!found) { |
373 | // !found should imply hittingRequiresCollapsing() regardless of writer presence | |
374 | if (!entry.hittingRequiresCollapsing()) { | |
d816f28d | 375 | debugs(20, DBG_IMPORTANT, "ERROR: Squid BUG: missing ENTRY_REQUIRES_COLLAPSING for " << entry); |
d1d3b4dc EB |
376 | throw TextException("transients entry missing ENTRY_REQUIRES_COLLAPSING", Here()); |
377 | } | |
378 | ||
6c8cbe63 | 379 | if (!hadWriter) { |
d1d3b4dc EB |
380 | // prevent others from falling into the same trap |
381 | throw TextException("unattached transients entry missing writer", Here()); | |
382 | } | |
383 | } | |
4310f8b0 EB |
384 | } |
385 | } | |
386 | ||
2745fea5 | 387 | StoreEntry * |
80d0fe08 | 388 | Store::Controller::findCallbackXXX(const cache_key *key) |
2745fea5 | 389 | { |
4310f8b0 EB |
390 | // We could check for mem_obj presence (and more), moving and merging some |
391 | // of the duplicated neighborsUdpAck() and neighborsHtcpReply() code here, | |
392 | // but that would mean polluting Store with HTCP/ICP code. Instead, we | |
393 | // should encapsulate callback-related data in a protocol-neutral MemObject | |
394 | // member or use an HTCP/ICP-specific index rather than store_table. | |
80d0fe08 EB |
395 | |
396 | // cannot reuse peekAtLocal() because HTCP/ICP callbacks may use private keys | |
397 | return static_cast<StoreEntry*>(hash_lookup(store_table, key)); | |
4310f8b0 | 398 | } |
2745fea5 | 399 | |
4310f8b0 EB |
400 | /// \returns either an existing local reusable StoreEntry object or nil |
401 | /// To treat remotely marked entries specially, | |
402 | /// callers ought to check markedForDeletion() first! | |
403 | StoreEntry * | |
404 | Store::Controller::peekAtLocal(const cache_key *key) | |
405 | { | |
2745fea5 | 406 | if (StoreEntry *e = static_cast<StoreEntry*>(hash_lookup(store_table, key))) { |
4310f8b0 EB |
407 | // callers must only search for public entries |
408 | assert(!EBIT_TEST(e->flags, KEY_PRIVATE)); | |
409 | assert(e->publicKey()); | |
410 | checkTransients(*e); | |
411 | ||
2745fea5 AR |
412 | // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries |
413 | // because their backing store slot may be gone already. | |
4310f8b0 EB |
414 | return e; |
415 | } | |
416 | return nullptr; | |
417 | } | |
418 | ||
419 | StoreEntry * | |
420 | Store::Controller::peek(const cache_key *key) | |
421 | { | |
422 | debugs(20, 3, storeKeyText(key)); | |
423 | ||
424 | if (markedForDeletion(key)) { | |
425 | debugs(20, 3, "ignoring marked in-transit " << storeKeyText(key)); | |
426 | return nullptr; | |
427 | } | |
428 | ||
429 | if (StoreEntry *e = peekAtLocal(key)) { | |
430 | debugs(20, 3, "got local in-transit entry: " << *e); | |
2745fea5 AR |
431 | return e; |
432 | } | |
433 | ||
434 | // Must search transients before caches because we must sync those we find. | |
435 | if (transients) { | |
436 | if (StoreEntry *e = transients->get(key)) { | |
437 | debugs(20, 3, "got shared in-transit entry: " << *e); | |
4310f8b0 | 438 | return e; |
2745fea5 AR |
439 | } |
440 | } | |
441 | ||
daed75a9 EB |
442 | if (sharedMemStore) { |
443 | if (StoreEntry *e = sharedMemStore->get(key)) { | |
bf95c10a | 444 | debugs(20, 3, "got mem-cached entry: " << *e); |
2745fea5 AR |
445 | return e; |
446 | } | |
447 | } | |
448 | ||
e305d771 A |
449 | if (const auto e = disks->get(key)) { |
450 | debugs(20, 3, "got disk-cached entry: " << *e); | |
451 | return e; | |
2745fea5 AR |
452 | } |
453 | ||
454 | debugs(20, 4, "cannot locate " << storeKeyText(key)); | |
455 | return nullptr; | |
456 | } | |
457 | ||
4310f8b0 EB |
458 | bool |
459 | Store::Controller::transientsReader(const StoreEntry &e) const | |
460 | { | |
461 | return transients && e.hasTransients() && transients->isReader(e); | |
462 | } | |
463 | ||
464 | bool | |
465 | Store::Controller::transientsWriter(const StoreEntry &e) const | |
466 | { | |
467 | return transients && e.hasTransients() && transients->isWriter(e); | |
468 | } | |
469 | ||
5ca027f0 AR |
470 | int64_t |
471 | Store::Controller::accumulateMore(StoreEntry &entry) const | |
472 | { | |
e305d771 | 473 | return disks->accumulateMore(entry); |
5ca027f0 AR |
474 | // The memory cache should not influence for-swapout accumulation decision. |
475 | } | |
476 | ||
4310f8b0 EB |
477 | // Must be called from StoreEntry::release() or releaseRequest() because |
478 | // those methods currently manage local indexing of StoreEntry objects. | |
479 | // TODO: Replace StoreEntry::release*() with Root().evictCached(). | |
480 | void | |
481 | Store::Controller::evictCached(StoreEntry &e) | |
482 | { | |
483 | debugs(20, 7, e); | |
484 | if (transients) | |
485 | transients->evictCached(e); | |
486 | memoryEvictCached(e); | |
e305d771 | 487 | disks->evictCached(e); |
4310f8b0 EB |
488 | } |
489 | ||
2745fea5 | 490 | void |
4310f8b0 EB |
491 | Store::Controller::evictIfFound(const cache_key *key) |
492 | { | |
493 | debugs(20, 7, storeKeyText(key)); | |
494 | ||
495 | if (StoreEntry *entry = peekAtLocal(key)) { | |
496 | debugs(20, 5, "marking local in-transit " << *entry); | |
497 | entry->release(true); | |
498 | return; | |
499 | } | |
500 | ||
daed75a9 EB |
501 | if (sharedMemStore) |
502 | sharedMemStore->evictIfFound(key); | |
e305d771 A |
503 | |
504 | disks->evictIfFound(key); | |
505 | ||
4310f8b0 EB |
506 | if (transients) |
507 | transients->evictIfFound(key); | |
508 | } | |
509 | ||
510 | /// whether the memory cache is allowed to store that many additional pages | |
511 | bool | |
512 | Store::Controller::memoryCacheHasSpaceFor(const int pagesRequired) const | |
2745fea5 | 513 | { |
4310f8b0 EB |
514 | // XXX: We count mem_nodes but may free shared memory pages instead. |
515 | const auto fits = mem_node::InUseCount() + pagesRequired <= store_pages_max; | |
516 | debugs(20, 7, fits << ": " << mem_node::InUseCount() << '+' << pagesRequired << '?' << store_pages_max); | |
517 | return fits; | |
2745fea5 AR |
518 | } |
519 | ||
520 | void | |
4310f8b0 | 521 | Store::Controller::freeMemorySpace(const int bytesRequired) |
2745fea5 | 522 | { |
4310f8b0 EB |
523 | const auto pagesRequired = (bytesRequired + SM_PAGE_SIZE-1) / SM_PAGE_SIZE; |
524 | ||
525 | if (memoryCacheHasSpaceFor(pagesRequired)) | |
526 | return; | |
527 | ||
528 | // XXX: When store_pages_max is smaller than pagesRequired, we should not | |
529 | // look for more space (but we do because we want to abandon idle entries?). | |
530 | ||
531 | // limit our performance impact to one walk per second | |
532 | static time_t lastWalk = 0; | |
533 | if (lastWalk == squid_curtime) | |
534 | return; | |
535 | lastWalk = squid_curtime; | |
536 | ||
537 | debugs(20, 2, "need " << pagesRequired << " pages"); | |
538 | ||
539 | // let abandon()/handleIdleEntry() know about the impeding memory shortage | |
540 | memoryPagesDebt_ = pagesRequired; | |
541 | ||
542 | // XXX: SMP-unaware: Walkers should iterate memory cache, not store_table. | |
543 | // XXX: Limit iterations by time, not arbitrary count. | |
544 | const auto walker = mem_policy->PurgeInit(mem_policy, 100000); | |
545 | int removed = 0; | |
546 | while (const auto entry = walker->Next(walker)) { | |
547 | // Abandoned memory cache entries are purged during memory shortage. | |
d868b138 | 548 | entry->abandon(__func__); // may delete entry |
4310f8b0 EB |
549 | ++removed; |
550 | ||
551 | if (memoryCacheHasSpaceFor(pagesRequired)) | |
552 | break; | |
553 | } | |
554 | // TODO: Move to RemovalPolicyWalker::Done() that has more/better details. | |
555 | debugs(20, 3, "removed " << removed << " out of " << hot_obj_count << " memory-cached entries"); | |
556 | walker->Done(walker); | |
557 | memoryPagesDebt_ = 0; | |
2745fea5 AR |
558 | } |
559 | ||
560 | // move this into [non-shared] memory cache class when we have one | |
561 | /// whether e should be kept in local RAM for possible future caching | |
562 | bool | |
563 | Store::Controller::keepForLocalMemoryCache(StoreEntry &e) const | |
564 | { | |
565 | if (!e.memoryCachable()) | |
566 | return false; | |
567 | ||
568 | // does the current and expected size obey memory caching limits? | |
569 | assert(e.mem_obj); | |
570 | const int64_t loadedSize = e.mem_obj->endOffset(); | |
571 | const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0 | |
572 | const int64_t ramSize = max(loadedSize, expectedSize); | |
573 | const int64_t ramLimit = min( | |
574 | static_cast<int64_t>(Config.memMaxSize), | |
575 | static_cast<int64_t>(Config.Store.maxInMemObjSize)); | |
576 | return ramSize <= ramLimit; | |
577 | } | |
578 | ||
579 | void | |
580 | Store::Controller::memoryOut(StoreEntry &e, const bool preserveSwappable) | |
581 | { | |
582 | bool keepInLocalMemory = false; | |
daed75a9 EB |
583 | if (sharedMemStore) |
584 | sharedMemStore->write(e); // leave keepInLocalMemory false | |
585 | else if (localMemStore) | |
2745fea5 AR |
586 | keepInLocalMemory = keepForLocalMemoryCache(e); |
587 | ||
bf95c10a | 588 | debugs(20, 7, "keepInLocalMemory: " << keepInLocalMemory); |
2745fea5 AR |
589 | |
590 | if (!keepInLocalMemory) | |
591 | e.trimMemory(preserveSwappable); | |
592 | } | |
593 | ||
4310f8b0 EB |
594 | /// removes the entry from the memory cache |
595 | /// XXX: Dangerous side effect: Unlocked entries lose their mem_obj. | |
2745fea5 | 596 | void |
4310f8b0 | 597 | Store::Controller::memoryEvictCached(StoreEntry &e) |
2745fea5 | 598 | { |
4310f8b0 | 599 | // TODO: Untangle memory caching from mem_obj. |
daed75a9 EB |
600 | if (sharedMemStore) |
601 | sharedMemStore->evictCached(e); | |
2745fea5 | 602 | else // TODO: move into [non-shared] memory cache class when we have one |
4310f8b0 EB |
603 | if (!e.locked()) |
604 | e.destroyMemObject(); | |
2745fea5 AR |
605 | } |
606 | ||
607 | void | |
608 | Store::Controller::memoryDisconnect(StoreEntry &e) | |
609 | { | |
daed75a9 EB |
610 | if (sharedMemStore) |
611 | sharedMemStore->disconnect(e); | |
2745fea5 AR |
612 | // else nothing to do for non-shared memory cache |
613 | } | |
614 | ||
615 | void | |
24c93780 | 616 | Store::Controller::noteStoppedSharedWriting(StoreEntry &e) |
2745fea5 | 617 | { |
24c93780 | 618 | if (transients && e.hasTransients()) // paranoid: the caller should check |
4310f8b0 | 619 | transients->completeWriting(e); |
2745fea5 AR |
620 | } |
621 | ||
622 | int | |
623 | Store::Controller::transientReaders(const StoreEntry &e) const | |
624 | { | |
4310f8b0 | 625 | return (transients && e.hasTransients()) ? |
2745fea5 AR |
626 | transients->readers(e) : 0; |
627 | } | |
628 | ||
629 | void | |
4310f8b0 | 630 | Store::Controller::transientsDisconnect(StoreEntry &e) |
2745fea5 AR |
631 | { |
632 | if (transients) | |
4310f8b0 | 633 | transients->disconnect(e); |
2745fea5 AR |
634 | } |
635 | ||
636 | void | |
637 | Store::Controller::handleIdleEntry(StoreEntry &e) | |
638 | { | |
639 | bool keepInLocalMemory = false; | |
640 | ||
641 | if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) { | |
642 | // Icons (and cache digests?) should stay in store_table until we | |
643 | // have a dedicated storage for them (that would not purge them). | |
644 | // They are not managed [well] by any specific Store handled below. | |
645 | keepInLocalMemory = true; | |
daed75a9 EB |
646 | } else if (sharedMemStore) { |
647 | // leave keepInLocalMemory false; sharedMemStore maintains its own cache | |
648 | } else if (localMemStore) { | |
2745fea5 AR |
649 | keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and |
650 | // the local memory cache is not overflowing | |
4310f8b0 | 651 | memoryCacheHasSpaceFor(memoryPagesDebt_); |
2745fea5 AR |
652 | } |
653 | ||
654 | // An idle, unlocked entry that only belongs to a SwapDir which controls | |
655 | // its own index, should not stay in the global store_table. | |
656 | if (!dereferenceIdle(e, keepInLocalMemory)) { | |
bf95c10a | 657 | debugs(20, 5, "destroying unlocked entry: " << &e << ' ' << e); |
2745fea5 AR |
658 | destroyStoreEntry(static_cast<hash_link*>(&e)); |
659 | return; | |
660 | } | |
661 | ||
bf95c10a | 662 | debugs(20, 5, "keepInLocalMemory: " << keepInLocalMemory); |
2745fea5 | 663 | |
8a07d123 AR |
664 | // formerly known as "WARNING: found KEY_PRIVATE" |
665 | assert(!EBIT_TEST(e.flags, KEY_PRIVATE)); | |
666 | ||
2745fea5 AR |
667 | // TODO: move this into [non-shared] memory cache class when we have one |
668 | if (keepInLocalMemory) { | |
669 | e.setMemStatus(IN_MEMORY); | |
670 | e.mem_obj->unlinkRequest(); | |
4310f8b0 EB |
671 | return; |
672 | } | |
673 | ||
674 | // We know the in-memory data will be gone. Get rid of the entire entry if | |
675 | // it has nothing worth preserving on disk either. | |
676 | if (!e.swappedOut()) { | |
677 | e.release(); // deletes e | |
678 | return; | |
2745fea5 | 679 | } |
4310f8b0 EB |
680 | |
681 | memoryEvictCached(e); // may already be gone | |
682 | // and keep the entry in store_table for its on-disk data | |
2745fea5 AR |
683 | } |
684 | ||
55e1c6e8 | 685 | bool |
66d51f4f | 686 | Store::Controller::updateOnNotModified(StoreEntry *old, StoreEntry &e304) |
abf396ec | 687 | { |
abf396ec | 688 | Must(old); |
66d51f4f AR |
689 | Must(old->mem_obj); |
690 | Must(e304.mem_obj); | |
691 | ||
692 | // updateOnNotModified() may be called many times for the same old entry. | |
693 | // e304.mem_obj->appliedUpdates value distinguishes two cases: | |
694 | // false: Independent store clients revalidating the same old StoreEntry. | |
695 | // Each such update uses its own e304. The old StoreEntry | |
696 | // accumulates such independent updates. | |
697 | // true: Store clients feeding off the same 304 response. Each such update | |
698 | // uses the same e304. For timestamps correctness and performance | |
699 | // sake, it is best to detect and skip such repeated update calls. | |
700 | if (e304.mem_obj->appliedUpdates) { | |
701 | debugs(20, 5, "ignored repeated update of " << *old << " with " << e304); | |
55e1c6e8 | 702 | return true; |
66d51f4f AR |
703 | } |
704 | e304.mem_obj->appliedUpdates = true; | |
1a210de4 | 705 | |
55e1c6e8 EB |
706 | try { |
707 | if (!old->updateOnNotModified(e304)) { | |
708 | debugs(20, 5, "updated nothing in " << *old << " with " << e304); | |
709 | return true; | |
710 | } | |
711 | } catch (...) { | |
712 | debugs(20, DBG_IMPORTANT, "ERROR: Failed to update a cached response: " << CurrentException); | |
713 | return false; | |
66d51f4f AR |
714 | } |
715 | ||
716 | if (sharedMemStore && old->mem_status == IN_MEMORY && !EBIT_TEST(old->flags, ENTRY_SPECIAL)) | |
717 | sharedMemStore->updateHeaders(old); | |
abf396ec | 718 | |
66d51f4f | 719 | if (old->swap_dirn > -1) |
e305d771 | 720 | disks->updateHeaders(old); |
55e1c6e8 EB |
721 | |
722 | return true; | |
abf396ec AR |
723 | } |
724 | ||
4310f8b0 | 725 | bool |
2745fea5 | 726 | Store::Controller::allowCollapsing(StoreEntry *e, const RequestFlags &reqFlags, |
8b082ed9 | 727 | const HttpRequestMethod &) |
2745fea5 | 728 | { |
1a210de4 | 729 | const KeyScope keyScope = reqFlags.refresh ? ksRevalidation : ksDefault; |
d2a6dcba EB |
730 | // set the flag now so that it gets copied into the Transients entry |
731 | e->setCollapsingRequirement(true); | |
4310f8b0 EB |
732 | if (e->makePublic(keyScope)) { // this is needed for both local and SMP collapsing |
733 | debugs(20, 3, "may " << (transients && e->hasTransients() ? | |
734 | "SMP-" : "locally-") << "collapse " << *e); | |
24c93780 | 735 | assert(e->hittingRequiresCollapsing()); |
4310f8b0 EB |
736 | return true; |
737 | } | |
d2a6dcba EB |
738 | // paranoid cleanup; the flag is meaningless for private entries |
739 | e->setCollapsingRequirement(false); | |
4310f8b0 EB |
740 | return false; |
741 | } | |
742 | ||
743 | void | |
744 | Store::Controller::addReading(StoreEntry *e, const cache_key *key) | |
745 | { | |
746 | if (transients) | |
747 | transients->monitorIo(e, key, Store::ioReading); | |
748 | e->hashInsert(key); | |
749 | } | |
750 | ||
751 | void | |
752 | Store::Controller::addWriting(StoreEntry *e, const cache_key *key) | |
753 | { | |
754 | assert(e); | |
755 | if (EBIT_TEST(e->flags, ENTRY_SPECIAL)) | |
756 | return; // constant memory-resident entries do not need transients | |
757 | ||
2745fea5 | 758 | if (transients) |
4310f8b0 EB |
759 | transients->monitorIo(e, key, Store::ioWriting); |
760 | // else: non-SMP configurations do not need transients | |
2745fea5 AR |
761 | } |
762 | ||
763 | void | |
764 | Store::Controller::syncCollapsed(const sfileno xitIndex) | |
765 | { | |
766 | assert(transients); | |
767 | ||
768 | StoreEntry *collapsed = transients->findCollapsed(xitIndex); | |
4310f8b0 | 769 | if (!collapsed) { // the entry is no longer active, ignore update |
2745fea5 AR |
770 | debugs(20, 7, "not SMP-syncing not-transient " << xitIndex); |
771 | return; | |
772 | } | |
4310f8b0 EB |
773 | |
774 | if (!collapsed->locked()) { | |
775 | debugs(20, 3, "skipping (and may destroy) unlocked " << *collapsed); | |
776 | handleIdleEntry(*collapsed); | |
777 | return; | |
778 | } | |
779 | ||
2745fea5 | 780 | assert(collapsed->mem_obj); |
4310f8b0 EB |
781 | |
782 | if (EBIT_TEST(collapsed->flags, ENTRY_ABORTED)) { | |
783 | debugs(20, 3, "skipping already aborted " << *collapsed); | |
784 | return; | |
785 | } | |
2745fea5 AR |
786 | |
787 | debugs(20, 7, "syncing " << *collapsed); | |
788 | ||
d2a6dcba EB |
789 | Transients::EntryStatus entryStatus; |
790 | transients->status(*collapsed, entryStatus); | |
4310f8b0 | 791 | |
d2a6dcba | 792 | if (entryStatus.waitingToBeFreed) { |
4310f8b0 EB |
793 | debugs(20, 3, "will release " << *collapsed << " due to waitingToBeFreed"); |
794 | collapsed->release(true); // may already be marked | |
795 | } | |
796 | ||
797 | if (transients->isWriter(*collapsed)) | |
798 | return; // readers can only change our waitingToBeFreed flag | |
799 | ||
800 | assert(transients->isReader(*collapsed)); | |
801 | ||
2745fea5 AR |
802 | bool found = false; |
803 | bool inSync = false; | |
daed75a9 | 804 | if (sharedMemStore && collapsed->mem_obj->memCache.io == MemObject::ioDone) { |
2745fea5 AR |
805 | found = true; |
806 | inSync = true; | |
778610b5 | 807 | debugs(20, 7, "already handled by memory store: " << *collapsed); |
daed75a9 | 808 | } else if (sharedMemStore && collapsed->hasMemStore()) { |
2745fea5 | 809 | found = true; |
daed75a9 | 810 | inSync = sharedMemStore->updateAnchored(*collapsed); |
4310f8b0 | 811 | // TODO: handle entries attached to both memory and disk |
e305d771 | 812 | } else if (collapsed->hasDisk()) { |
2745fea5 | 813 | found = true; |
e305d771 | 814 | inSync = disks->updateAnchored(*collapsed); |
2745fea5 | 815 | } else { |
778610b5 AR |
816 | try { |
817 | found = anchorToCache(*collapsed); | |
818 | inSync = found; | |
819 | } catch (...) { | |
820 | // TODO: Write an exception handler for the entire method. | |
821 | debugs(20, 3, "anchorToCache() failed for " << *collapsed << ": " << CurrentException); | |
822 | collapsed->abort(); | |
823 | return; | |
824 | } | |
2745fea5 AR |
825 | } |
826 | ||
d2a6dcba | 827 | if (entryStatus.waitingToBeFreed && !found) { |
4310f8b0 EB |
828 | debugs(20, 3, "aborting unattached " << *collapsed << |
829 | " because it was marked for deletion before we could attach it"); | |
2745fea5 AR |
830 | collapsed->abort(); |
831 | return; | |
832 | } | |
833 | ||
834 | if (inSync) { | |
835 | debugs(20, 5, "synced " << *collapsed); | |
9358e99f AR |
836 | assert(found); |
837 | collapsed->setCollapsingRequirement(false); | |
2745fea5 | 838 | collapsed->invokeHandlers(); |
4310f8b0 EB |
839 | return; |
840 | } | |
841 | ||
842 | if (found) { // unrecoverable problem syncing this entry | |
2745fea5 AR |
843 | debugs(20, 3, "aborting unsyncable " << *collapsed); |
844 | collapsed->abort(); | |
4310f8b0 | 845 | return; |
2745fea5 | 846 | } |
4310f8b0 | 847 | |
24c93780 AR |
848 | if (!entryStatus.hasWriter) { |
849 | debugs(20, 3, "aborting abandoned-by-writer " << *collapsed); | |
850 | collapsed->abort(); | |
851 | return; | |
852 | } | |
853 | ||
4310f8b0 EB |
854 | // the entry is still not in one of the caches |
855 | debugs(20, 7, "waiting " << *collapsed); | |
9358e99f | 856 | collapsed->setCollapsingRequirement(true); |
2745fea5 AR |
857 | } |
858 | ||
778610b5 | 859 | /// If possible and has not been done, associates the entry with its store(s). |
24c93780 AR |
860 | /// \returns false for not-yet-cached entries that we may attach later |
861 | /// \returns true for other entries after synchronizing them with their store | |
2745fea5 | 862 | bool |
778610b5 | 863 | Store::Controller::anchorToCache(StoreEntry &entry) |
2745fea5 | 864 | { |
4310f8b0 EB |
865 | assert(entry.hasTransients()); |
866 | assert(transientsReader(entry)); | |
2745fea5 | 867 | |
778610b5 AR |
868 | // TODO: Attach entries to both memory and disk |
869 | ||
870 | // TODO: Reduce code duplication with syncCollapsed() | |
871 | if (sharedMemStore && entry.mem().memCache.io == MemObject::ioDone) { | |
872 | debugs(20, 5, "already handled by memory store: " << entry); | |
873 | return true; | |
874 | } else if (sharedMemStore && entry.hasMemStore()) { | |
875 | debugs(20, 5, "already anchored to memory store: " << entry); | |
876 | return true; | |
e305d771 | 877 | } else if (entry.hasDisk()) { |
778610b5 AR |
878 | debugs(20, 5, "already anchored to disk: " << entry); |
879 | return true; | |
880 | } | |
881 | ||
4310f8b0 | 882 | debugs(20, 7, "anchoring " << entry); |
2745fea5 | 883 | |
24c93780 AR |
884 | Transients::EntryStatus entryStatus; |
885 | transients->status(entry, entryStatus); | |
886 | ||
2745fea5 | 887 | bool found = false; |
daed75a9 | 888 | if (sharedMemStore) |
778610b5 | 889 | found = sharedMemStore->anchorToCache(entry); |
e305d771 A |
890 | if (!found) |
891 | found = disks->anchorToCache(entry); | |
2745fea5 | 892 | |
778610b5 | 893 | if (found) { |
9358e99f | 894 | debugs(20, 7, "anchored " << entry); |
9358e99f AR |
895 | entry.setCollapsingRequirement(false); |
896 | return true; | |
897 | } | |
898 | ||
778610b5 AR |
899 | if (entryStatus.waitingToBeFreed) |
900 | throw TextException("will never be able to anchor to an already marked entry", Here()); | |
24c93780 | 901 | |
778610b5 AR |
902 | if (!entryStatus.hasWriter) |
903 | throw TextException("will never be able to anchor to an abandoned-by-writer entry", Here()); | |
2745fea5 | 904 | |
24c93780 | 905 | debugs(20, 7, "skipping not yet cached " << entry); |
9358e99f | 906 | entry.setCollapsingRequirement(true); |
24c93780 | 907 | return false; |
2745fea5 AR |
908 | } |
909 | ||
1a210de4 | 910 | bool |
daed75a9 | 911 | Store::Controller::SmpAware() |
1a210de4 | 912 | { |
daed75a9 | 913 | return MemStore::Enabled() || Disks::SmpAware(); |
1a210de4 EB |
914 | } |
915 | ||
4310f8b0 EB |
916 | void |
917 | Store::Controller::checkTransients(const StoreEntry &e) const | |
918 | { | |
919 | if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) | |
920 | return; | |
921 | assert(!transients || e.hasTransients()); | |
922 | } | |
923 | ||
2745fea5 AR |
924 | Store::Controller& |
925 | Store::Root() | |
926 | { | |
1f50e07b AR |
927 | static const auto root = new Controller(); |
928 | return *root; | |
2745fea5 | 929 | } |
7d84d4ca | 930 |