]> git.ipfire.org Git - thirdparty/squid.git/blame - src/MemStore.cc
Made some of the Rock collapsing behavior dependent on collapsed_forwarding.
[thirdparty/squid.git] / src / MemStore.cc
CommitLineData
9487bae9 1/*
9487bae9
AR
2 * DEBUG: section 20 Memory Cache
3 *
4 */
5
f7f3304a 6#include "squid.h"
a4555399 7#include "base/RunnersRegistry.h"
582c2af2 8#include "HttpReply.h"
9487bae9
AR
9#include "ipc/mem/Page.h"
10#include "ipc/mem/Pages.h"
11#include "MemObject.h"
12#include "MemStore.h"
b6149797 13#include "mime_header.h"
4d5904f7 14#include "SquidConfig.h"
06684a9b 15#include "SquidMath.h"
93bc1434 16#include "StoreStats.h"
5bed43d6 17#include "tools.h"
9487bae9 18
a4555399 19/// shared memory segment path to use for MemStore maps
06684a9b
AR
20static const char *MapLabel = "cache_mem_map";
21/// shared memory segment path to use for the free slices index
22static const char *SpaceLabel = "cache_mem_space";
23// TODO: sync with Rock::SwapDir::*Path()
9487bae9 24
06684a9b
AR
25// We store free slot IDs (i.e., "space") as Page objects so that we can use
26// Ipc::Mem::PageStack. Pages require pool IDs. The value here is not really
27// used except for a positivity test. A unique value is handy for debugging.
28static const uint32_t SpacePoolId = 510716;
9487bae9 29
06684a9b
AR
30
31MemStore::MemStore(): map(NULL), lastWritingSlice(-1)
9487bae9
AR
32{
33}
34
35MemStore::~MemStore()
36{
37 delete map;
38}
39
40void
9199139f
AR
41MemStore::init()
42{
a4555399
AR
43 const int64_t entryLimit = EntryLimit();
44 if (entryLimit <= 0)
45 return; // no memory cache configured or a misconfiguration
46
06684a9b
AR
47 // check compatibility with the disk cache, if any
48 if (Config.cacheSwap.n_configured > 0) {
ed5d80d3
AR
49 const int64_t diskMaxSize = Store::Root().maxObjectSize();
50 const int64_t memMaxSize = maxObjectSize();
51 if (diskMaxSize == -1) {
52 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
53 "is unlimited but mem-cache maximum object size is " <<
54 memMaxSize / 1024.0 << " KB");
55 } else if (diskMaxSize > memMaxSize) {
56 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
57 "is too large for mem-cache: " <<
58 diskMaxSize / 1024.0 << " KB > " <<
59 memMaxSize / 1024.0 << " KB");
af2fda07 60 }
06684a9b 61 }
af2fda07 62
06684a9b
AR
63 freeSlots = shm_old(Ipc::Mem::PageStack)(SpaceLabel);
64
65 Must(!map);
66 map = new MemStoreMap(MapLabel);
a4555399 67 map->cleaner = this;
9487bae9
AR
68}
69
93bc1434
AR
70void
71MemStore::getStats(StoreInfoStats &stats) const
72{
73 const size_t pageSize = Ipc::Mem::PageSize();
74
75 stats.mem.shared = true;
76 stats.mem.capacity =
77 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
78 stats.mem.size =
79 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
80 stats.mem.count = currentCount();
81}
82
9487bae9 83void
c4e688b7 84MemStore::stat(StoreEntry &e) const
9487bae9 85{
c4e688b7
AR
86 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
87
06684a9b
AR
88 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
89 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
90 currentSize() / 1024.0,
91 Math::doublePercent(currentSize(), maxSize()));
c4e688b7
AR
92
93 if (map) {
94 const int limit = map->entryLimit();
95 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
96 if (limit > 0) {
c91ca3ce 97 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
9199139f 98 currentCount(), (100.0 * currentCount() / limit));
c4e688b7 99
06684a9b
AR
100 const unsigned int slotsFree =
101 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage);
102 if (slotsFree <= static_cast<const unsigned int>(limit)) {
103 const int usedSlots = limit - static_cast<const int>(slotsFree);
104 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
105 usedSlots, (100.0 * usedSlots / limit));
106 }
107
c4e688b7
AR
108 if (limit < 100) { // XXX: otherwise too expensive to count
109 Ipc::ReadWriteLockStats stats;
110 map->updateStats(stats);
111 stats.dump(e);
9199139f
AR
112 }
113 }
114 }
9487bae9
AR
115}
116
117void
118MemStore::maintain()
119{
120}
121
122uint64_t
123MemStore::minSize() const
124{
125 return 0; // XXX: irrelevant, but Store parent forces us to implement this
126}
127
128uint64_t
129MemStore::maxSize() const
130{
06684a9b 131 return Config.memMaxSize;
9487bae9
AR
132}
133
39c1e1d9
DK
134uint64_t
135MemStore::currentSize() const
136{
06684a9b
AR
137 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) *
138 Ipc::Mem::PageSize();
39c1e1d9
DK
139}
140
141uint64_t
142MemStore::currentCount() const
143{
144 return map ? map->entryCount() : 0;
145}
146
af2fda07
DK
147int64_t
148MemStore::maxObjectSize() const
149{
06684a9b 150 return min(Config.Store.maxInMemObjSize, Config.memMaxSize);
af2fda07
DK
151}
152
9487bae9
AR
153void
154MemStore::reference(StoreEntry &)
155{
156}
157
4c973beb 158bool
54347cbd 159MemStore::dereference(StoreEntry &, bool)
9487bae9 160{
4c973beb
AR
161 // no need to keep e in the global store_table for us; we have our own map
162 return false;
9487bae9
AR
163}
164
165int
166MemStore::callback()
167{
168 return 0;
169}
170
171StoreSearch *
172MemStore::search(String const, HttpRequest *)
173{
174 fatal("not implemented");
175 return NULL;
176}
177
178StoreEntry *
179MemStore::get(const cache_key *key)
180{
181 if (!map)
182 return NULL;
183
9487bae9 184 sfileno index;
10dc0fe6 185 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index);
9487bae9
AR
186 if (!slot)
187 return NULL;
188
10dc0fe6 189 const Ipc::StoreMapAnchor::Basics &basics = slot->basics;
9487bae9
AR
190
191 // create a brand new store entry and initialize it with stored info
192 StoreEntry *e = new StoreEntry();
193 e->lock_count = 0;
194
195 e->swap_file_sz = basics.swap_file_sz;
196 e->lastref = basics.lastref;
197 e->timestamp = basics.timestamp;
198 e->expires = basics.expires;
199 e->lastmod = basics.lastmod;
200 e->refcount = basics.refcount;
201 e->flags = basics.flags;
202
203 e->store_status = STORE_OK;
204 e->mem_status = IN_MEMORY; // setMemStatus(IN_MEMORY) requires mem_obj
205 //e->swap_status = set in StoreEntry constructor to SWAPOUT_NONE;
206 e->ping_status = PING_NONE;
207
208 EBIT_SET(e->flags, ENTRY_CACHABLE);
209 EBIT_CLR(e->flags, RELEASE_REQUEST);
210 EBIT_CLR(e->flags, KEY_PRIVATE);
211 EBIT_SET(e->flags, ENTRY_VALIDATED);
212
06684a9b 213 const bool copied = copyFromShm(*e, index, *slot);
9487bae9
AR
214
215 // we copied everything we could to local memory; no more need to lock
216 map->closeForReading(index);
217
218 if (copied) {
219 e->hashInsert(key);
220 return e;
221 }
222
223 debugs(20, 3, HERE << "mem-loading failed; freeing " << index);
10dc0fe6 224 map->freeEntry(index); // do not let others into the same trap
9487bae9
AR
225 return NULL;
226}
227
228void
229MemStore::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
230{
231 // XXX: not needed but Store parent forces us to implement this
232 fatal("MemStore::get(key,callback,data) should not be called");
233}
234
06684a9b 235/// copies the entire entry from shared to local memory
9487bae9 236bool
06684a9b 237MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
9487bae9 238{
06684a9b 239 debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start);
9487bae9
AR
240
241 // XXX: We do not know the URLs yet, only the key, but we need to parse and
242 // store the response for the Root().get() callers to be happy because they
243 // expect IN_MEMORY entries to already have the response headers and body.
244 // At least one caller calls createMemObject() if there is not one, so
245 // we hide the true object until that happens (to avoid leaking TBD URLs).
246 e.createMemObject("TBD", "TBD");
247
248 // emulate the usual Store code but w/o inapplicable checks and callbacks:
249
06684a9b
AR
250 Ipc::StoreMapSliceId sid = anchor.start;
251 int64_t offset = 0;
252 while (sid >= 0) {
253 const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
254 const MemStoreMap::Extras &extras = map->extras(sid);
255 StoreIOBuffer sliceBuf(slice.size, offset,
256 static_cast<char*>(PagePointer(extras.page)));
257 if (!copyFromShmSlice(e, sliceBuf, slice.next < 0))
258 return false;
259 debugs(20, 9, "entry " << index << " slice " << sid << " filled " <<
260 extras.page);
261 offset += slice.size;
262 sid = slice.next;
9487bae9 263 }
9487bae9 264
06684a9b
AR
265 e.mem_obj->object_sz = e.mem_obj->endOffset(); // from StoreEntry::complete()
266 debugs(20, 7, "mem-loaded all " << e.mem_obj->object_sz << '/' <<
267 anchor.basics.swap_file_sz << " bytes of " << e);
268 assert(e.mem_obj->object_sz >= 0);
269 assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz);
9487bae9
AR
270 // would be nice to call validLength() here, but it needs e.key
271
9487bae9
AR
272
273 e.hideMemObject();
274
275 return true;
276}
277
06684a9b
AR
278/// imports one shared memory slice into local memory
279bool
280MemStore::copyFromShmSlice(StoreEntry &e, StoreIOBuffer &buf, bool eof)
281{
282 debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
283
284 // from store_client::readBody()
285 // parse headers if needed; they might span multiple slices!
286 HttpReply *rep = (HttpReply *)e.getReply();
287 if (rep->pstate < psParsed) {
288 // XXX: have to copy because httpMsgParseStep() requires 0-termination
289 MemBuf mb;
290 mb.init(buf.length+1, buf.length+1);
291 mb.append(buf.data, buf.length);
292 mb.terminate();
293 const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
294 if (result > 0) {
295 assert(rep->pstate == psParsed);
296 } else if (result < 0) {
297 debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e);
298 return false;
299 } else { // more slices are needed
300 assert(!eof);
301 }
302 }
303 debugs(20, 7, "rep pstate: " << rep->pstate);
304
305 // local memory stores both headers and body so copy regardless of pstate
306 const int64_t offBefore = e.mem_obj->endOffset();
307 assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
308 const int64_t offAfter = e.mem_obj->endOffset();
309 // expect to write the entire buf because StoreEntry::write() never fails
310 assert(offAfter >= 0 && offBefore <= offAfter &&
311 static_cast<size_t>(offAfter - offBefore) == buf.length);
312 return true;
313}
314
96a7de88
DK
315bool
316MemStore::keepInLocalMemory(const StoreEntry &e) const
9487bae9
AR
317{
318 if (!e.memoryCachable()) {
319 debugs(20, 7, HERE << "Not memory cachable: " << e);
96a7de88
DK
320 return false; // will not cache due to entry state or properties
321 }
322
323 assert(e.mem_obj);
324 const int64_t loadedSize = e.mem_obj->endOffset();
325 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
326 const int64_t ramSize = max(loadedSize, expectedSize);
327
06684a9b 328 if (ramSize > maxObjectSize()) {
96a7de88
DK
329 debugs(20, 5, HERE << "Too big max(" <<
330 loadedSize << ", " << expectedSize << "): " << e);
331 return false; // will not cache due to cachable entry size limits
332 }
333
96a7de88
DK
334 return true;
335}
336
337void
338MemStore::considerKeeping(StoreEntry &e)
339{
340 if (!keepInLocalMemory(e))
341 return;
342
449ca8c5
AR
343 // since we copy everything at once, we can only keep complete entries
344 if (e.store_status != STORE_OK) {
345 debugs(20, 7, HERE << "Incomplete: " << e);
346 return;
347 }
348
06684a9b
AR
349 if (e.mem_status == IN_MEMORY) {
350 debugs(20, 5, "already mem-cached: " << e);
351 return;
352 }
353
9487bae9 354 assert(e.mem_obj);
449ca8c5
AR
355
356 const int64_t loadedSize = e.mem_obj->endOffset();
357 const int64_t expectedSize = e.mem_obj->expectedReplySize();
358
96a7de88
DK
359 // objects of unknown size are not allowed into memory cache, for now
360 if (expectedSize < 0) {
361 debugs(20, 5, HERE << "Unknown expected size: " << e);
362 return;
363 }
364
449ca8c5
AR
365 // since we copy everything at once, we can only keep fully loaded entries
366 if (loadedSize != expectedSize) {
367 debugs(20, 7, HERE << "partially loaded: " << loadedSize << " != " <<
368 expectedSize);
369 return;
370 }
371
9487bae9
AR
372 keep(e); // may still fail
373}
374
06684a9b 375/// locks map anchor and calls copyToShm to store the entry in shared memory
9487bae9
AR
376void
377MemStore::keep(StoreEntry &e)
378{
379 if (!map) {
380 debugs(20, 5, HERE << "No map to mem-cache " << e);
381 return;
382 }
383
384 sfileno index = 0;
10dc0fe6 385 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
9487bae9
AR
386 if (!slot) {
387 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
388 return;
389 }
390
06684a9b
AR
391 try {
392 if (copyToShm(e, index, *slot)) {
393 slot->set(e);
394 map->closeForWriting(index, false);
395 return;
396 }
397 // fall through to the error handling code
398 }
399 catch (const std::exception &x) { // TODO: should we catch ... as well?
400 debugs(20, 2, "mem-caching error writing entry " << index <<
401 ' ' << e << ": " << x.what());
402 // fall through to the error handling code
9487bae9 403 }
06684a9b
AR
404
405 map->abortIo(index);
9487bae9
AR
406}
407
06684a9b 408/// copies all local data to shared memory
9487bae9 409bool
06684a9b
AR
410MemStore::copyToShm(StoreEntry &e, const sfileno index, Ipc::StoreMapAnchor &anchor)
411{
412 const int64_t eSize = e.mem_obj->endOffset();
413 int64_t offset = 0;
414 lastWritingSlice = -1;
415 while (offset < eSize) {
416 if (!copyToShmSlice(e, index, anchor, offset))
417 return false;
418 }
419
420 // check that we kept everything or purge incomplete/sparse cached entry
421 if (eSize != offset) {
422 debugs(20, 2, "Failed to mem-cache " << e << ": " <<
423 eSize << " != " << offset);
424 return false;
425 }
426
427 debugs(20, 7, "mem-cached all " << eSize << " bytes of " << e);
428 e.swap_file_sz = eSize;
429
430 return true;
431}
432
433/// copies one slice worth of local memory to shared memory
434bool
435MemStore::copyToShmSlice(StoreEntry &e, const sfileno index, Ipc::StoreMapAnchor &anchor, int64_t &offset)
9487bae9
AR
436{
437 Ipc::Mem::PageId page;
06684a9b
AR
438 Ipc::StoreMapSliceId sid = reserveSapForWriting(page); // throws
439 assert(sid >= 0 && page);
440 map->extras(sid).page = page; // remember the page location for cleanup
441 debugs(20, 7, "entry " << index << " slice " << sid << " has " << page);
442
443 // link this slice with other entry slices to form a store entry chain
444 if (!offset) {
445 assert(lastWritingSlice < 0);
446 anchor.start = sid;
447 debugs(20, 7, "entry " << index << " starts at slice " << sid);
448 } else {
449 assert(lastWritingSlice >= 0);
450 map->writeableSlice(index, lastWritingSlice).next = sid;
451 debugs(20, 7, "entry " << index << " slice " << lastWritingSlice <<
452 " followed by slice " << sid);
9487bae9 453 }
06684a9b 454 lastWritingSlice = sid;
9487bae9
AR
455
456 const int64_t bufSize = Ipc::Mem::PageSize();
06684a9b 457 StoreIOBuffer sharedSpace(bufSize, offset,
9487bae9 458 static_cast<char*>(PagePointer(page)));
9199139f 459
9487bae9
AR
460 // check that we kept everything or purge incomplete/sparse cached entry
461 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
06684a9b
AR
462 if (copied <= 0) {
463 debugs(20, 2, "Failed to mem-cache " << e << " using " <<
464 bufSize << " bytes from " << offset << " in " << page);
9487bae9
AR
465 return false;
466 }
467
06684a9b
AR
468 debugs(20, 7, "mem-cached " << copied << " bytes of " << e <<
469 " from " << offset << " to " << page);
470
471 Ipc::StoreMapSlice &slice = map->writeableSlice(index, sid);
472 slice.next = -1;
473 slice.size = copied;
9487bae9 474
06684a9b 475 offset += copied;
9487bae9
AR
476 return true;
477}
7f6748c8 478
06684a9b
AR
479/// finds a slot and a free page to fill or throws
480sfileno
481MemStore::reserveSapForWriting(Ipc::Mem::PageId &page)
7f6748c8 482{
06684a9b
AR
483 Ipc::Mem::PageId slot;
484 if (freeSlots->pop(slot)) {
485 debugs(20, 5, "got a previously free slot: " << slot);
486
487 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
488 debugs(20, 5, "and got a previously free page: " << page);
489 return slot.number - 1;
490 } else {
491 debugs(20, 3, "but there is no free page, returning " << slot);
492 freeSlots->push(slot);
493 }
494 }
495
496 // catch free slots delivered to noteFreeMapSlice()
497 assert(!waitingFor);
498 waitingFor.slot = &slot;
499 waitingFor.page = &page;
500 if (map->purgeOne()) {
501 assert(!waitingFor); // noteFreeMapSlice() should have cleared it
502 assert(slot.set());
503 assert(page.set());
504 debugs(20, 5, "got previously busy " << slot << " and " << page);
505 return slot.number - 1;
506 }
507 assert(waitingFor.slot == &slot && waitingFor.page == &page);
508 waitingFor.slot = NULL;
509 waitingFor.page = NULL;
510
511 debugs(47, 3, "cannot get a slice; entries: " << map->entryCount());
512 throw TexcHere("ran out of mem-cache slots");
513}
514
515void
516MemStore::noteFreeMapSlice(const sfileno sliceId)
517{
518 Ipc::Mem::PageId &pageId = map->extras(sliceId).page;
519 debugs(20, 9, "slice " << sliceId << " freed " << pageId);
520 assert(pageId);
521 Ipc::Mem::PageId slotId;
522 slotId.pool = SpacePoolId;
523 slotId.number = sliceId + 1;
524 if (!waitingFor) {
525 // must zero pageId before we give slice (and pageId extras!) to others
526 Ipc::Mem::PutPage(pageId);
527 freeSlots->push(slotId);
528 } else {
529 *waitingFor.slot = slotId;
530 *waitingFor.page = pageId;
531 waitingFor.slot = NULL;
532 waitingFor.page = NULL;
533 pageId = Ipc::Mem::PageId();
534 }
7f6748c8
AR
535}
536
a4555399
AR
537/// calculates maximum number of entries we need to store and map
538int64_t
539MemStore::EntryLimit()
540{
45e8762c 541 if (!Config.memShared || !Config.memMaxSize)
a4555399
AR
542 return 0; // no memory cache configured
543
06684a9b
AR
544 const int64_t minEntrySize = Ipc::Mem::PageSize();
545 const int64_t entryLimit = Config.memMaxSize / minEntrySize;
a4555399
AR
546 return entryLimit;
547}
548
ea2cdeb6
DK
549/// reports our needs for shared memory pages to Ipc::Mem::Pages
550class MemStoreClaimMemoryNeedsRr: public RegisteredRunner
551{
552public:
553 /* RegisteredRunner API */
554 virtual void run(const RunnerRegistry &r);
555};
556
557RunnerRegistrationEntry(rrClaimMemoryNeeds, MemStoreClaimMemoryNeedsRr);
558
ea2cdeb6
DK
559void
560MemStoreClaimMemoryNeedsRr::run(const RunnerRegistry &)
561{
562 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
563}
564
45e8762c
AR
565/// decides whether to use a shared memory cache or checks its configuration
566class MemStoreCfgRr: public ::RegisteredRunner
a4555399
AR
567{
568public:
569 /* RegisteredRunner API */
570 virtual void run(const RunnerRegistry &);
a4555399
AR
571};
572
45e8762c 573RunnerRegistrationEntry(rrFinalizeConfig, MemStoreCfgRr);
a4555399 574
45e8762c 575void MemStoreCfgRr::run(const RunnerRegistry &r)
a4555399 576{
57af1e3f
AR
577 // decide whether to use a shared memory cache if the user did not specify
578 if (!Config.memShared.configured()) {
794d4c0c 579 Config.memShared.configure(Ipc::Atomic::Enabled() &&
9199139f
AR
580 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
581 Config.memMaxSize > 0);
794d4c0c 582 } else if (Config.memShared && !Ipc::Atomic::Enabled()) {
65b81b27
AR
583 // bail if the user wants shared memory cache but we cannot support it
584 fatal("memory_cache_shared is on, but no support for atomic operations detected");
9199139f 585 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
c975f532 586 fatal("memory_cache_shared is on, but no support for shared memory detected");
53bbccec
DK
587 } else if (Config.memShared && !UsingSmp()) {
588 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
589 " a single worker is running");
57af1e3f 590 }
45e8762c
AR
591}
592
45e8762c
AR
593/// initializes shared memory segments used by MemStore
594class MemStoreRr: public Ipc::Mem::RegisteredRunner
595{
596public:
597 /* RegisteredRunner API */
06684a9b 598 MemStoreRr(): spaceOwner(NULL), mapOwner(NULL) {}
45e8762c
AR
599 virtual void run(const RunnerRegistry &);
600 virtual ~MemStoreRr();
601
602protected:
603 virtual void create(const RunnerRegistry &);
57af1e3f 604
45e8762c 605private:
6d68a230
AR
606 Ipc::Mem::Owner<Ipc::Mem::PageStack> *spaceOwner; ///< free slices Owner
607 MemStoreMap::Owner *mapOwner; ///< primary map Owner
45e8762c
AR
608};
609
610RunnerRegistrationEntry(rrAfterConfig, MemStoreRr);
611
45e8762c
AR
612void MemStoreRr::run(const RunnerRegistry &r)
613{
614 assert(Config.memShared.configured());
4404f1c5
DK
615 Ipc::Mem::RegisteredRunner::run(r);
616}
617
618void MemStoreRr::create(const RunnerRegistry &)
619{
57af1e3f 620 if (!Config.memShared)
60be8b2d 621 return;
a4555399 622
4404f1c5 623 const int64_t entryLimit = MemStore::EntryLimit();
ea2cdeb6
DK
624 if (entryLimit <= 0) {
625 if (Config.memMaxSize > 0) {
626 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small ("
627 << (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
628 (Ipc::Mem::PageSize() / 1024.0) << " KB");
629 }
4404f1c5 630 return; // no memory cache configured or a misconfiguration
ea2cdeb6 631 }
06684a9b
AR
632
633 Must(!spaceOwner);
634 spaceOwner = shm_new(Ipc::Mem::PageStack)(SpaceLabel, SpacePoolId,
635 entryLimit,
636 sizeof(Ipc::Mem::PageId));
637 Must(!mapOwner);
638 mapOwner = MemStoreMap::Init(MapLabel, entryLimit);
a4555399 639}
c011f9bc
DK
640
641MemStoreRr::~MemStoreRr()
642{
06684a9b
AR
643 delete mapOwner;
644 delete spaceOwner;
c011f9bc 645}