]> git.ipfire.org Git - thirdparty/squid.git/blame - src/store/Controller.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / store / Controller.cc
CommitLineData
2745fea5 1/*
4ac4a490 2 * Copyright (C) 1996-2017 The Squid Software Foundation and contributors
2745fea5
AR
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9/* DEBUG: section 20 Store Controller */
10
11#include "squid.h"
12#include "mem_node.h"
13#include "MemStore.h"
14#include "profiler/Profiler.h"
15#include "SquidConfig.h"
16#include "SquidMath.h"
17#include "store/Controller.h"
18#include "store/Disks.h"
19#include "store/LocalSearch.h"
20#include "tools.h"
21#include "Transients.h"
22
23#if HAVE_SYS_WAIT_H
24#include <sys/wait.h>
25#endif
26
27/*
28 * store_dirs_rebuilding is initialized to _1_ as a hack so that
29 * storeDirWriteCleanLogs() doesn't try to do anything unless _all_
30 * cache_dirs have been read. For example, without this hack, Squid
31 * will try to write clean log files if -kparse fails (becasue it
32 * calls fatal()).
33 */
34int Store::Controller::store_dirs_rebuilding = 1;
35
36Store::Controller::Controller() :
37 swapDir(new Disks),
38 memStore(NULL),
39 transients(NULL)
40{
41 assert(!store_table);
42}
43
44Store::Controller::~Controller()
45{
46 delete memStore;
47 delete transients;
48 delete swapDir;
49
50 if (store_table) {
51 hashFreeItems(store_table, destroyStoreEntry);
52 hashFreeMemory(store_table);
53 store_table = nullptr;
54 }
55}
56
57void
58Store::Controller::init()
59{
60 if (Config.memShared && IamWorkerProcess()) {
61 memStore = new MemStore;
62 memStore->init();
63 }
64
65 swapDir->init();
66
1a210de4
EB
67 if (UsingSmp() && IamWorkerProcess() && Config.onoff.collapsed_forwarding &&
68 smpAware()) {
2745fea5
AR
69 transients = new Transients;
70 transients->init();
71 }
72}
73
74void
75Store::Controller::create()
76{
77 swapDir->create();
78
79#if !_SQUID_WINDOWS_
2745fea5 80 pid_t pid;
2745fea5 81 do {
7def3beb
MM
82 PidStatus status;
83 pid = WaitForAnyPid(status, WNOHANG);
2745fea5 84 } while (pid > 0 || (pid < 0 && errno == EINTR));
2745fea5
AR
85#endif
86}
87
88void
89Store::Controller::maintain()
90{
91 static time_t last_warn_time = 0;
92
93 PROF_start(storeMaintainSwapSpace);
94 swapDir->maintain();
95
96 /* this should be emitted by the oversize dir, not globally */
97
98 if (Root().currentSize() > Store::Root().maxSize()) {
99 if (squid_curtime - last_warn_time > 10) {
100 debugs(20, DBG_CRITICAL, "WARNING: Disk space over limit: "
101 << Store::Root().currentSize() / 1024.0 << " KB > "
102 << (Store::Root().maxSize() >> 10) << " KB");
103 last_warn_time = squid_curtime;
104 }
105 }
106
107 PROF_stop(storeMaintainSwapSpace);
108}
109
110void
111Store::Controller::getStats(StoreInfoStats &stats) const
112{
113 if (memStore)
114 memStore->getStats(stats);
115 else {
116 // move this code to a non-shared memory cache class when we have it
117 stats.mem.shared = false;
118 stats.mem.capacity = Config.memMaxSize;
119 stats.mem.size = mem_node::StoreMemSize();
120 stats.mem.count = hot_obj_count;
121 }
122
123 swapDir->getStats(stats);
124
125 // low-level info not specific to memory or disk cache
126 stats.store_entry_count = StoreEntry::inUseCount();
127 stats.mem_object_count = MemObject::inUseCount();
128}
129
130void
131Store::Controller::stat(StoreEntry &output) const
132{
133 storeAppendPrintf(&output, "Store Directory Statistics:\n");
134 storeAppendPrintf(&output, "Store Entries : %lu\n",
135 (unsigned long int)StoreEntry::inUseCount());
136 storeAppendPrintf(&output, "Maximum Swap Size : %" PRIu64 " KB\n",
137 maxSize() >> 10);
138 storeAppendPrintf(&output, "Current Store Swap Size: %.2f KB\n",
139 currentSize() / 1024.0);
140 storeAppendPrintf(&output, "Current Capacity : %.2f%% used, %.2f%% free\n",
141 Math::doublePercent(currentSize(), maxSize()),
142 Math::doublePercent((maxSize() - currentSize()), maxSize()));
143
144 if (memStore)
145 memStore->stat(output);
146
147 /* now the swapDir */
148 swapDir->stat(output);
149}
150
151/* if needed, this could be taught to cache the result */
152uint64_t
153Store::Controller::maxSize() const
154{
155 /* TODO: include memory cache ? */
156 return swapDir->maxSize();
157}
158
159uint64_t
160Store::Controller::minSize() const
161{
162 /* TODO: include memory cache ? */
163 return swapDir->minSize();
164}
165
166uint64_t
167Store::Controller::currentSize() const
168{
169 /* TODO: include memory cache ? */
170 return swapDir->currentSize();
171}
172
173uint64_t
174Store::Controller::currentCount() const
175{
176 /* TODO: include memory cache ? */
177 return swapDir->currentCount();
178}
179
180int64_t
181Store::Controller::maxObjectSize() const
182{
183 /* TODO: include memory cache ? */
184 return swapDir->maxObjectSize();
185}
186
5ca027f0
AR
187void
188Store::Controller::updateLimits()
189{
190 swapDir->updateLimits();
191
192 store_swap_high = (long) (((float) maxSize() *
193 (float) Config.Swap.highWaterMark) / (float) 100);
194 store_swap_low = (long) (((float) maxSize() *
195 (float) Config.Swap.lowWaterMark) / (float) 100);
196 store_pages_max = Config.memMaxSize / sizeof(mem_node);
197
198 // TODO: move this into a memory cache class when we have one
199 const int64_t memMax = static_cast<int64_t>(min(Config.Store.maxInMemObjSize, Config.memMaxSize));
200 const int64_t disksMax = swapDir ? swapDir->maxObjectSize() : 0;
201 store_maxobjsize = std::max(disksMax, memMax);
202}
203
2745fea5
AR
204StoreSearch *
205Store::Controller::search()
206{
207 // this is the only kind of search we currently support
208 return NewLocalSearch();
209}
210
211void
212Store::Controller::sync(void)
213{
214 if (memStore)
215 memStore->sync();
216 swapDir->sync();
217}
218
219/*
220 * handle callbacks all avaliable fs'es
221 */
222int
223Store::Controller::callback()
224{
225 /* This will likely double count. Thats ok. */
226 PROF_start(storeDirCallback);
227
228 /* mem cache callbacks ? */
229 int result = swapDir->callback();
230
231 PROF_stop(storeDirCallback);
232
233 return result;
234}
235
236void
237Store::Controller::referenceBusy(StoreEntry &e)
238{
239 // special entries do not belong to any specific Store, but are IN_MEMORY
240 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
241 return;
242
243 /* Notify the fs that we're referencing this object again */
244
245 if (e.swap_dirn > -1)
246 swapDir->reference(e);
247
248 // Notify the memory cache that we're referencing this object again
249 if (memStore && e.mem_status == IN_MEMORY)
250 memStore->reference(e);
251
252 // TODO: move this code to a non-shared memory cache class when we have it
253 if (e.mem_obj) {
254 if (mem_policy->Referenced)
255 mem_policy->Referenced(mem_policy, &e, &e.mem_obj->repl);
256 }
257}
258
259bool
260Store::Controller::dereferenceIdle(StoreEntry &e, bool wantsLocalMemory)
261{
262 // special entries do not belong to any specific Store, but are IN_MEMORY
263 if (EBIT_TEST(e.flags, ENTRY_SPECIAL))
264 return true;
265
266 bool keepInStoreTable = false; // keep only if somebody needs it there
267
268 /* Notify the fs that we're not referencing this object any more */
269
270 if (e.swap_filen > -1)
271 keepInStoreTable = swapDir->dereference(e) || keepInStoreTable;
272
273 // Notify the memory cache that we're not referencing this object any more
274 if (memStore && e.mem_status == IN_MEMORY)
275 keepInStoreTable = memStore->dereference(e) || keepInStoreTable;
276
277 // TODO: move this code to a non-shared memory cache class when we have it
278 if (e.mem_obj) {
279 if (mem_policy->Dereferenced)
280 mem_policy->Dereferenced(mem_policy, &e, &e.mem_obj->repl);
281 // non-shared memory cache relies on store_table
282 if (!memStore)
283 keepInStoreTable = wantsLocalMemory || keepInStoreTable;
284 }
285
286 return keepInStoreTable;
287}
288
289StoreEntry *
290Store::Controller::get(const cache_key *key)
291{
292 if (StoreEntry *e = find(key)) {
293 // this is not very precise: some get()s are not initiated by clients
294 e->touch();
295 referenceBusy(*e);
296 return e;
297 }
298 return NULL;
299}
300
301/// Internal method to implements the guts of the Store::get() API:
302/// returns an in-transit or cached object with a given key, if any.
303StoreEntry *
304Store::Controller::find(const cache_key *key)
305{
306 debugs(20, 3, storeKeyText(key));
307
308 if (StoreEntry *e = static_cast<StoreEntry*>(hash_lookup(store_table, key))) {
309 // TODO: ignore and maybe handleIdleEntry() unlocked intransit entries
310 // because their backing store slot may be gone already.
311 debugs(20, 3, HERE << "got in-transit entry: " << *e);
312 return e;
313 }
314
315 // Must search transients before caches because we must sync those we find.
316 if (transients) {
317 if (StoreEntry *e = transients->get(key)) {
318 debugs(20, 3, "got shared in-transit entry: " << *e);
319 bool inSync = false;
320 const bool found = anchorCollapsed(*e, inSync);
321 if (!found || inSync)
322 return e;
323 assert(!e->locked()); // ensure release will destroyStoreEntry()
324 e->release(); // do not let others into the same trap
325 return NULL;
326 }
327 }
328
329 if (memStore) {
330 if (StoreEntry *e = memStore->get(key)) {
331 debugs(20, 3, HERE << "got mem-cached entry: " << *e);
332 return e;
333 }
334 }
335
336 if (swapDir) {
337 if (StoreEntry *e = swapDir->get(key)) {
338 debugs(20, 3, "got disk-cached entry: " << *e);
339 return e;
340 }
341 }
342
343 debugs(20, 4, "cannot locate " << storeKeyText(key));
344 return nullptr;
345}
346
5ca027f0
AR
347int64_t
348Store::Controller::accumulateMore(StoreEntry &entry) const
349{
350 return swapDir ? swapDir->accumulateMore(entry) : 0;
351 // The memory cache should not influence for-swapout accumulation decision.
352}
353
2745fea5
AR
354void
355Store::Controller::markForUnlink(StoreEntry &e)
356{
357 if (transients && e.mem_obj && e.mem_obj->xitTable.index >= 0)
358 transients->markForUnlink(e);
359 if (memStore && e.mem_obj && e.mem_obj->memCache.index >= 0)
360 memStore->markForUnlink(e);
361 if (swapDir && e.swap_filen >= 0)
362 swapDir->markForUnlink(e);
363}
364
365void
366Store::Controller::unlink(StoreEntry &e)
367{
368 memoryUnlink(e);
369 if (swapDir && e.swap_filen >= 0)
370 swapDir->unlink(e);
371}
372
373// move this into [non-shared] memory cache class when we have one
374/// whether e should be kept in local RAM for possible future caching
375bool
376Store::Controller::keepForLocalMemoryCache(StoreEntry &e) const
377{
378 if (!e.memoryCachable())
379 return false;
380
381 // does the current and expected size obey memory caching limits?
382 assert(e.mem_obj);
383 const int64_t loadedSize = e.mem_obj->endOffset();
384 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
385 const int64_t ramSize = max(loadedSize, expectedSize);
386 const int64_t ramLimit = min(
387 static_cast<int64_t>(Config.memMaxSize),
388 static_cast<int64_t>(Config.Store.maxInMemObjSize));
389 return ramSize <= ramLimit;
390}
391
392void
393Store::Controller::memoryOut(StoreEntry &e, const bool preserveSwappable)
394{
395 bool keepInLocalMemory = false;
396 if (memStore)
397 memStore->write(e); // leave keepInLocalMemory false
398 else
399 keepInLocalMemory = keepForLocalMemoryCache(e);
400
401 debugs(20, 7, HERE << "keepInLocalMemory: " << keepInLocalMemory);
402
403 if (!keepInLocalMemory)
404 e.trimMemory(preserveSwappable);
405}
406
407void
408Store::Controller::memoryUnlink(StoreEntry &e)
409{
410 if (memStore)
411 memStore->unlink(e);
412 else // TODO: move into [non-shared] memory cache class when we have one
413 e.destroyMemObject();
414}
415
416void
417Store::Controller::memoryDisconnect(StoreEntry &e)
418{
419 if (memStore)
420 memStore->disconnect(e);
421 // else nothing to do for non-shared memory cache
422}
423
424void
425Store::Controller::transientsAbandon(StoreEntry &e)
426{
427 if (transients) {
428 assert(e.mem_obj);
429 if (e.mem_obj->xitTable.index >= 0)
430 transients->abandon(e);
431 }
432}
433
434void
435Store::Controller::transientsCompleteWriting(StoreEntry &e)
436{
437 if (transients) {
438 assert(e.mem_obj);
439 if (e.mem_obj->xitTable.index >= 0)
440 transients->completeWriting(e);
441 }
442}
443
444int
445Store::Controller::transientReaders(const StoreEntry &e) const
446{
447 return (transients && e.mem_obj && e.mem_obj->xitTable.index >= 0) ?
448 transients->readers(e) : 0;
449}
450
451void
452Store::Controller::transientsDisconnect(MemObject &mem_obj)
453{
454 if (transients)
455 transients->disconnect(mem_obj);
456}
457
458void
459Store::Controller::handleIdleEntry(StoreEntry &e)
460{
461 bool keepInLocalMemory = false;
462
463 if (EBIT_TEST(e.flags, ENTRY_SPECIAL)) {
464 // Icons (and cache digests?) should stay in store_table until we
465 // have a dedicated storage for them (that would not purge them).
466 // They are not managed [well] by any specific Store handled below.
467 keepInLocalMemory = true;
468 } else if (memStore) {
469 // leave keepInLocalMemory false; memStore maintains its own cache
470 } else {
471 keepInLocalMemory = keepForLocalMemoryCache(e) && // in good shape and
472 // the local memory cache is not overflowing
473 (mem_node::InUseCount() <= store_pages_max);
474 }
475
476 // An idle, unlocked entry that only belongs to a SwapDir which controls
477 // its own index, should not stay in the global store_table.
478 if (!dereferenceIdle(e, keepInLocalMemory)) {
479 debugs(20, 5, HERE << "destroying unlocked entry: " << &e << ' ' << e);
480 destroyStoreEntry(static_cast<hash_link*>(&e));
481 return;
482 }
483
484 debugs(20, 5, HERE << "keepInLocalMemory: " << keepInLocalMemory);
485
486 // TODO: move this into [non-shared] memory cache class when we have one
487 if (keepInLocalMemory) {
488 e.setMemStatus(IN_MEMORY);
489 e.mem_obj->unlinkRequest();
490 } else {
491 e.purgeMem(); // may free e
492 }
493}
494
abf396ec
AR
495void
496Store::Controller::updateOnNotModified(StoreEntry *old, const StoreEntry &newer)
497{
498 /* update the old entry object */
499 Must(old);
500 HttpReply *oldReply = const_cast<HttpReply*>(old->getReply());
501 Must(oldReply);
1a210de4
EB
502
503 const bool modified = oldReply->updateOnNotModified(newer.getReply());
504 if (!old->timestampsSet() && !modified)
505 return;
abf396ec
AR
506
507 /* update stored image of the old entry */
508
509 if (memStore && old->mem_status == IN_MEMORY && !EBIT_TEST(old->flags, ENTRY_SPECIAL))
510 memStore->updateHeaders(old);
511
512 if (old->swap_dirn > -1)
513 swapDir->updateHeaders(old);
514}
515
2745fea5
AR
516void
517Store::Controller::allowCollapsing(StoreEntry *e, const RequestFlags &reqFlags,
7d84d4ca 518 const HttpRequestMethod &reqMethod)
2745fea5 519{
1a210de4
EB
520 const KeyScope keyScope = reqFlags.refresh ? ksRevalidation : ksDefault;
521 e->makePublic(keyScope); // this is needed for both local and SMP collapsing
2745fea5
AR
522 if (transients)
523 transients->startWriting(e, reqFlags, reqMethod);
524 debugs(20, 3, "may " << (transients && e->mem_obj->xitTable.index >= 0 ?
525 "SMP-" : "locally-") << "collapse " << *e);
526}
527
528void
529Store::Controller::syncCollapsed(const sfileno xitIndex)
530{
531 assert(transients);
532
533 StoreEntry *collapsed = transients->findCollapsed(xitIndex);
534 if (!collapsed) { // the entry is no longer locally active, ignore update
535 debugs(20, 7, "not SMP-syncing not-transient " << xitIndex);
536 return;
537 }
538 assert(collapsed->mem_obj);
539 assert(collapsed->mem_obj->smpCollapsed);
540
541 debugs(20, 7, "syncing " << *collapsed);
542
543 bool abandoned = transients->abandoned(*collapsed);
544 bool found = false;
545 bool inSync = false;
546 if (memStore && collapsed->mem_obj->memCache.io == MemObject::ioDone) {
547 found = true;
548 inSync = true;
549 debugs(20, 7, "fully mem-loaded " << *collapsed);
550 } else if (memStore && collapsed->mem_obj->memCache.index >= 0) {
551 found = true;
552 inSync = memStore->updateCollapsed(*collapsed);
553 } else if (swapDir && collapsed->swap_filen >= 0) {
554 found = true;
555 inSync = swapDir->updateCollapsed(*collapsed);
556 } else {
557 found = anchorCollapsed(*collapsed, inSync);
558 }
559
560 if (abandoned && collapsed->store_status == STORE_PENDING) {
561 debugs(20, 3, "aborting abandoned but STORE_PENDING " << *collapsed);
562 collapsed->abort();
563 return;
564 }
565
566 if (inSync) {
567 debugs(20, 5, "synced " << *collapsed);
568 collapsed->invokeHandlers();
569 } else if (found) { // unrecoverable problem syncing this entry
570 debugs(20, 3, "aborting unsyncable " << *collapsed);
571 collapsed->abort();
572 } else { // the entry is still not in one of the caches
573 debugs(20, 7, "waiting " << *collapsed);
574 }
575}
576
577/// Called for in-transit entries that are not yet anchored to a cache.
578/// For cached entries, return true after synchronizing them with their cache
579/// (making inSync true on success). For not-yet-cached entries, return false.
580bool
581Store::Controller::anchorCollapsed(StoreEntry &collapsed, bool &inSync)
582{
583 // this method is designed to work with collapsed transients only
584 assert(collapsed.mem_obj);
585 assert(collapsed.mem_obj->xitTable.index >= 0);
586 assert(collapsed.mem_obj->smpCollapsed);
587
588 debugs(20, 7, "anchoring " << collapsed);
589
590 bool found = false;
591 if (memStore)
592 found = memStore->anchorCollapsed(collapsed, inSync);
593 if (!found && swapDir)
594 found = swapDir->anchorCollapsed(collapsed, inSync);
595
596 if (found) {
597 if (inSync)
598 debugs(20, 7, "anchored " << collapsed);
599 else
600 debugs(20, 5, "failed to anchor " << collapsed);
601 } else {
602 debugs(20, 7, "skipping not yet cached " << collapsed);
603 }
604
605 return found;
606}
607
1a210de4
EB
608bool
609Store::Controller::smpAware() const
610{
611 return memStore || (swapDir && swapDir->smpAware());
612}
613
2745fea5 614namespace Store {
7d84d4ca 615static RefCount<Controller> TheRoot;
2745fea5
AR
616}
617
618Store::Controller&
619Store::Root()
620{
621 assert(TheRoot);
622 return *TheRoot;
623}
624
625void
626Store::Init(Controller *root)
627{
628 TheRoot = root ? root : new Controller;
629}
630
631void
632Store::FreeMemory()
633{
634 TheRoot = nullptr;
635}
7d84d4ca 636