]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemStore.cc
Support "appending" read/write lock state that can be shared by readers
[thirdparty/squid.git] / src / MemStore.cc
1 /*
2 * DEBUG: section 20 Memory Cache
3 *
4 */
5
6 #include "squid.h"
7 #include "base/RunnersRegistry.h"
8 #include "CollapsedForwarding.h"
9 #include "HttpReply.h"
10 #include "ipc/mem/Page.h"
11 #include "ipc/mem/Pages.h"
12 #include "MemObject.h"
13 #include "MemStore.h"
14 #include "mime_header.h"
15 #include "SquidConfig.h"
16 #include "SquidMath.h"
17 #include "StoreStats.h"
18 #include "tools.h"
19
20 /// shared memory segment path to use for MemStore maps
21 static const char *MapLabel = "cache_mem_map";
22 /// shared memory segment path to use for the free slices index
23 static const char *SpaceLabel = "cache_mem_space";
24 // TODO: sync with Rock::SwapDir::*Path()
25
26 // We store free slot IDs (i.e., "space") as Page objects so that we can use
27 // Ipc::Mem::PageStack. Pages require pool IDs. The value here is not really
28 // used except for a positivity test. A unique value is handy for debugging.
29 static const uint32_t SpacePoolId = 510716;
30
31
32 MemStore::MemStore(): map(NULL), lastWritingSlice(-1)
33 {
34 }
35
36 MemStore::~MemStore()
37 {
38 delete map;
39 }
40
41 void
42 MemStore::init()
43 {
44 const int64_t entryLimit = EntryLimit();
45 if (entryLimit <= 0)
46 return; // no memory cache configured or a misconfiguration
47
48 // check compatibility with the disk cache, if any
49 if (Config.cacheSwap.n_configured > 0) {
50 const int64_t diskMaxSize = Store::Root().maxObjectSize();
51 const int64_t memMaxSize = maxObjectSize();
52 if (diskMaxSize == -1) {
53 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
54 "is unlimited but mem-cache maximum object size is " <<
55 memMaxSize / 1024.0 << " KB");
56 } else if (diskMaxSize > memMaxSize) {
57 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
58 "is too large for mem-cache: " <<
59 diskMaxSize / 1024.0 << " KB > " <<
60 memMaxSize / 1024.0 << " KB");
61 }
62 }
63
64 freeSlots = shm_old(Ipc::Mem::PageStack)(SpaceLabel);
65
66 Must(!map);
67 map = new MemStoreMap(MapLabel);
68 map->cleaner = this;
69 }
70
71 void
72 MemStore::getStats(StoreInfoStats &stats) const
73 {
74 const size_t pageSize = Ipc::Mem::PageSize();
75
76 stats.mem.shared = true;
77 stats.mem.capacity =
78 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
79 stats.mem.size =
80 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
81 stats.mem.count = currentCount();
82 }
83
84 void
85 MemStore::stat(StoreEntry &e) const
86 {
87 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
88
89 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
90 storeAppendPrintf(&e, "Current Size: %.2f KB %.2f%%\n",
91 currentSize() / 1024.0,
92 Math::doublePercent(currentSize(), maxSize()));
93
94 if (map) {
95 const int limit = map->entryLimit();
96 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
97 if (limit > 0) {
98 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
99 currentCount(), (100.0 * currentCount() / limit));
100
101 const unsigned int slotsFree =
102 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage);
103 if (slotsFree <= static_cast<const unsigned int>(limit)) {
104 const int usedSlots = limit - static_cast<const int>(slotsFree);
105 storeAppendPrintf(&e, "Used slots: %9d %.2f%%\n",
106 usedSlots, (100.0 * usedSlots / limit));
107 }
108
109 if (limit < 100) { // XXX: otherwise too expensive to count
110 Ipc::ReadWriteLockStats stats;
111 map->updateStats(stats);
112 stats.dump(e);
113 }
114 }
115 }
116 }
117
118 void
119 MemStore::maintain()
120 {
121 }
122
123 uint64_t
124 MemStore::minSize() const
125 {
126 return 0; // XXX: irrelevant, but Store parent forces us to implement this
127 }
128
129 uint64_t
130 MemStore::maxSize() const
131 {
132 return Config.memMaxSize;
133 }
134
135 uint64_t
136 MemStore::currentSize() const
137 {
138 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) *
139 Ipc::Mem::PageSize();
140 }
141
142 uint64_t
143 MemStore::currentCount() const
144 {
145 return map ? map->entryCount() : 0;
146 }
147
148 int64_t
149 MemStore::maxObjectSize() const
150 {
151 return min(Config.Store.maxInMemObjSize, Config.memMaxSize);
152 }
153
154 void
155 MemStore::reference(StoreEntry &)
156 {
157 }
158
159 bool
160 MemStore::dereference(StoreEntry &, bool)
161 {
162 // no need to keep e in the global store_table for us; we have our own map
163 return false;
164 }
165
166 int
167 MemStore::callback()
168 {
169 return 0;
170 }
171
172 StoreSearch *
173 MemStore::search(String const, HttpRequest *)
174 {
175 fatal("not implemented");
176 return NULL;
177 }
178
179 StoreEntry *
180 MemStore::get(const cache_key *key)
181 {
182 if (!map)
183 return NULL;
184
185 sfileno index;
186 const Ipc::StoreMapAnchor *const slot = map->openForReading(key, index);
187 if (!slot)
188 return NULL;
189
190 // create a brand new store entry and initialize it with stored info
191 StoreEntry *e = new StoreEntry();
192 e->lock_count = 0;
193
194 // XXX: We do not know the URLs yet, only the key, but we need to parse and
195 // store the response for the Root().get() callers to be happy because they
196 // expect IN_MEMORY entries to already have the response headers and body.
197 // At least one caller calls createMemObject() if there is not one, so
198 // we hide the true object until that happens (to avoid leaking TBD URLs).
199 e->createMemObject("TBD", "TBD");
200
201 anchorEntry(*e, index, *slot);
202
203 const bool copied = copyFromShm(*e, index, *slot);
204
205 // we copied everything we could to local memory; no more need to lock
206 map->closeForReading(index);
207 e->mem_obj->mem_index = -1;
208
209 e->hideMemObject();
210
211 if (copied) {
212 e->hashInsert(key);
213 return e;
214 }
215
216 debugs(20, 3, HERE << "mem-loading failed; freeing " << index);
217 map->freeEntry(index); // do not let others into the same trap
218 return NULL;
219 }
220
221 void
222 MemStore::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
223 {
224 // XXX: not needed but Store parent forces us to implement this
225 fatal("MemStore::get(key,callback,data) should not be called");
226 }
227
228 bool
229 MemStore::anchorCollapsed(StoreEntry &collapsed)
230 {
231 if (!map)
232 return false;
233
234 sfileno index;
235 const Ipc::StoreMapAnchor *const slot = map->openForReading(
236 reinterpret_cast<cache_key*>(collapsed.key), index);
237 if (!slot)
238 return false;
239
240 anchorEntry(collapsed, index, *slot);
241 return updateCollapsedWith(collapsed, index, *slot);
242 }
243
244 bool
245 MemStore::updateCollapsed(StoreEntry &collapsed)
246 {
247 if (!map)
248 return false;
249
250 if (collapsed.mem_status != IN_MEMORY) // no longer using a memory cache
251 return false;
252
253 const sfileno index = collapsed.mem_obj->mem_index;
254
255 // already disconnected from the cache, no need to update
256 if (index < 0)
257 return true;
258
259 const Ipc::StoreMapAnchor &anchor = map->readableEntry(index);
260 return updateCollapsedWith(collapsed, index, anchor);
261 }
262
263 bool
264 MemStore::updateCollapsedWith(StoreEntry &collapsed, const sfileno index, const Ipc::StoreMapAnchor &anchor)
265 {
266 collapsed.swap_file_sz = anchor.basics.swap_file_sz; // XXX: make atomic
267
268 const bool copied = copyFromShm(collapsed, index, anchor);
269
270 return copied; // XXX: when do we unlock the map slot?
271 }
272
273 /// anchors StoreEntry to an already locked map entry
274 void
275 MemStore::anchorEntry(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
276 {
277 const Ipc::StoreMapAnchor::Basics &basics = anchor.basics;
278
279 e.swap_file_sz = basics.swap_file_sz;
280 e.lastref = basics.lastref;
281 e.timestamp = basics.timestamp;
282 e.expires = basics.expires;
283 e.lastmod = basics.lastmod;
284 e.refcount = basics.refcount;
285 e.flags = basics.flags;
286
287 assert(e.mem_obj);
288 e.store_status = STORE_OK;
289 e.setMemStatus(IN_MEMORY);
290 e.mem_obj->mem_index = index;
291 assert(e.swap_status == SWAPOUT_NONE); // set in StoreEntry constructor
292 e.ping_status = PING_NONE;
293
294 EBIT_SET(e.flags, ENTRY_CACHABLE);
295 EBIT_CLR(e.flags, RELEASE_REQUEST);
296 EBIT_CLR(e.flags, KEY_PRIVATE);
297 EBIT_SET(e.flags, ENTRY_VALIDATED);
298 }
299
300 /// copies the entire entry from shared to local memory
301 bool
302 MemStore::copyFromShm(StoreEntry &e, const sfileno index, const Ipc::StoreMapAnchor &anchor)
303 {
304 debugs(20, 7, "mem-loading entry " << index << " from " << anchor.start);
305
306 // emulate the usual Store code but w/o inapplicable checks and callbacks:
307
308 Ipc::StoreMapSliceId sid = anchor.start; // optimize: remember the last sid
309 bool wasEof = anchor.complete() && sid < 0;
310 int64_t sliceOffset = 0;
311 while (sid >= 0) {
312 const Ipc::StoreMapSlice &slice = map->readableSlice(index, sid);
313 // slice state may change during copying; take snapshots now
314 wasEof = anchor.complete() && slice.next < 0;
315 const Ipc::StoreMapSlice::Size wasSize = slice.size;
316
317 if (e.mem_obj->endOffset() < sliceOffset + wasSize) {
318 // size of the slice data that we already copied
319 const size_t prefixSize = e.mem_obj->endOffset() - sliceOffset;
320 assert(prefixSize <= wasSize);
321
322 const MemStoreMap::Extras &extras = map->extras(sid);
323 char *page = static_cast<char*>(PagePointer(extras.page));
324 const StoreIOBuffer sliceBuf(wasSize - prefixSize,
325 e.mem_obj->endOffset(),
326 page + prefixSize);
327 if (!copyFromShmSlice(e, sliceBuf, wasEof))
328 return false;
329 debugs(20, 9, "entry " << index << " copied slice " << sid <<
330 " from " << extras.page << " +" << prefixSize);
331 }
332 // else skip a [possibly incomplete] slice that we copied earlier
333
334 // careful: the slice may have grown _and_ gotten the next slice ID!
335 if (slice.next >= 0) {
336 assert(!wasEof);
337 // here we know that slice.size may not change any more
338 if (wasSize >= slice.size) { // did not grow since we started copying
339 sliceOffset += wasSize;
340 sid = slice.next;
341 }
342 } else if (wasSize >= slice.size) { // did not grow
343 break;
344 }
345 }
346
347 if (!wasEof) {
348 debugs(20, 7, "mem-loaded " << e.mem_obj->endOffset() << '/' <<
349 anchor.basics.swap_file_sz << " bytes of " << e);
350 return true;
351 }
352
353 e.mem_obj->object_sz = e.mem_obj->endOffset(); // from StoreEntry::complete()
354 debugs(20, 7, "mem-loaded all " << e.mem_obj->object_sz << '/' <<
355 anchor.basics.swap_file_sz << " bytes of " << e);
356 assert(e.mem_obj->object_sz >= 0);
357 assert(static_cast<uint64_t>(e.mem_obj->object_sz) == anchor.basics.swap_file_sz);
358 // would be nice to call validLength() here, but it needs e.key
359
360 // XXX: unlock acnhor here!
361 return true;
362 }
363
364 /// imports one shared memory slice into local memory
365 bool
366 MemStore::copyFromShmSlice(StoreEntry &e, const StoreIOBuffer &buf, bool eof)
367 {
368 debugs(20, 7, "buf: " << buf.offset << " + " << buf.length);
369
370 // from store_client::readBody()
371 // parse headers if needed; they might span multiple slices!
372 HttpReply *rep = (HttpReply *)e.getReply();
373 if (rep->pstate < psParsed) {
374 // XXX: have to copy because httpMsgParseStep() requires 0-termination
375 MemBuf mb;
376 mb.init(buf.length+1, buf.length+1);
377 mb.append(buf.data, buf.length);
378 mb.terminate();
379 const int result = rep->httpMsgParseStep(mb.buf, buf.length, eof);
380 if (result > 0) {
381 assert(rep->pstate == psParsed);
382 } else if (result < 0) {
383 debugs(20, DBG_IMPORTANT, "Corrupted mem-cached headers: " << e);
384 return false;
385 } else { // more slices are needed
386 assert(!eof);
387 }
388 }
389 debugs(20, 7, "rep pstate: " << rep->pstate);
390
391 // local memory stores both headers and body so copy regardless of pstate
392 const int64_t offBefore = e.mem_obj->endOffset();
393 assert(e.mem_obj->data_hdr.write(buf)); // from MemObject::write()
394 const int64_t offAfter = e.mem_obj->endOffset();
395 // expect to write the entire buf because StoreEntry::write() never fails
396 assert(offAfter >= 0 && offBefore <= offAfter &&
397 static_cast<size_t>(offAfter - offBefore) == buf.length);
398 return true;
399 }
400
401 bool
402 MemStore::keepInLocalMemory(const StoreEntry &e) const
403 {
404 if (!e.memoryCachable()) {
405 debugs(20, 7, HERE << "Not memory cachable: " << e);
406 return false; // will not cache due to entry state or properties
407 }
408
409 assert(e.mem_obj);
410 const int64_t loadedSize = e.mem_obj->endOffset();
411 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
412 const int64_t ramSize = max(loadedSize, expectedSize);
413
414 if (ramSize > maxObjectSize()) {
415 debugs(20, 5, HERE << "Too big max(" <<
416 loadedSize << ", " << expectedSize << "): " << e);
417 return false; // will not cache due to cachable entry size limits
418 }
419
420 return true;
421 }
422
423 void
424 MemStore::considerKeeping(StoreEntry &e)
425 {
426 if (!keepInLocalMemory(e))
427 return;
428
429 // since we copy everything at once, we can only keep complete entries
430 if (e.store_status != STORE_OK) {
431 debugs(20, 7, HERE << "Incomplete: " << e);
432 return;
433 }
434
435 if (e.mem_status == IN_MEMORY) {
436 debugs(20, 5, "already mem-cached: " << e);
437 return;
438 }
439
440 assert(e.mem_obj);
441
442 const int64_t loadedSize = e.mem_obj->endOffset();
443 const int64_t expectedSize = e.mem_obj->expectedReplySize();
444
445 // objects of unknown size are not allowed into memory cache, for now
446 if (expectedSize < 0) {
447 debugs(20, 5, HERE << "Unknown expected size: " << e);
448 return;
449 }
450
451 // since we copy everything at once, we can only keep fully loaded entries
452 if (loadedSize != expectedSize) {
453 debugs(20, 7, HERE << "partially loaded: " << loadedSize << " != " <<
454 expectedSize);
455 return;
456 }
457
458 keep(e); // may still fail
459 }
460
461 /// locks map anchor and calls copyToShm to store the entry in shared memory
462 void
463 MemStore::keep(StoreEntry &e)
464 {
465 if (!map) {
466 debugs(20, 5, HERE << "No map to mem-cache " << e);
467 return;
468 }
469
470 sfileno index = 0;
471 Ipc::StoreMapAnchor *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
472 if (!slot) {
473 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
474 return;
475 }
476
477 try {
478 if (copyToShm(e, index, *slot)) {
479 slot->set(e);
480 map->closeForWriting(index, false);
481 CollapsedForwarding::Broadcast(static_cast<const cache_key*>(e.key));
482 return;
483 }
484 // fall through to the error handling code
485 }
486 catch (const std::exception &x) { // TODO: should we catch ... as well?
487 debugs(20, 2, "mem-caching error writing entry " << index <<
488 ' ' << e << ": " << x.what());
489 // fall through to the error handling code
490 }
491
492 map->abortIo(index);
493 CollapsedForwarding::Broadcast(static_cast<cache_key*>(e.key));
494 }
495
496 /// copies all local data to shared memory
497 bool
498 MemStore::copyToShm(StoreEntry &e, const sfileno index, Ipc::StoreMapAnchor &anchor)
499 {
500 const int64_t eSize = e.mem_obj->endOffset();
501 int64_t offset = 0;
502 lastWritingSlice = -1;
503 while (offset < eSize) {
504 if (!copyToShmSlice(e, index, anchor, offset))
505 return false;
506 }
507
508 // check that we kept everything or purge incomplete/sparse cached entry
509 if (eSize != offset) {
510 debugs(20, 2, "Failed to mem-cache " << e << ": " <<
511 eSize << " != " << offset);
512 return false;
513 }
514
515 debugs(20, 7, "mem-cached all " << eSize << " bytes of " << e);
516 e.swap_file_sz = eSize;
517
518 return true;
519 }
520
521 /// copies one slice worth of local memory to shared memory
522 bool
523 MemStore::copyToShmSlice(StoreEntry &e, const sfileno index, Ipc::StoreMapAnchor &anchor, int64_t &offset)
524 {
525 Ipc::Mem::PageId page;
526 Ipc::StoreMapSliceId sid = reserveSapForWriting(page); // throws
527 assert(sid >= 0 && page);
528 map->extras(sid).page = page; // remember the page location for cleanup
529 debugs(20, 7, "entry " << index << " slice " << sid << " has " << page);
530
531 // link this slice with other entry slices to form a store entry chain
532 if (!offset) {
533 assert(lastWritingSlice < 0);
534 anchor.start = sid;
535 debugs(20, 7, "entry " << index << " starts at slice " << sid);
536 } else {
537 assert(lastWritingSlice >= 0);
538 map->writeableSlice(index, lastWritingSlice).next = sid;
539 debugs(20, 7, "entry " << index << " slice " << lastWritingSlice <<
540 " followed by slice " << sid);
541 }
542 lastWritingSlice = sid;
543
544 const int64_t bufSize = Ipc::Mem::PageSize();
545 StoreIOBuffer sharedSpace(bufSize, offset,
546 static_cast<char*>(PagePointer(page)));
547
548 // check that we kept everything or purge incomplete/sparse cached entry
549 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
550 if (copied <= 0) {
551 debugs(20, 2, "Failed to mem-cache " << e << " using " <<
552 bufSize << " bytes from " << offset << " in " << page);
553 return false;
554 }
555
556 debugs(20, 7, "mem-cached " << copied << " bytes of " << e <<
557 " from " << offset << " to " << page);
558
559 Ipc::StoreMapSlice &slice = map->writeableSlice(index, sid);
560 slice.next = -1;
561 slice.size = copied;
562
563 offset += copied;
564 return true;
565 }
566
567 /// finds a slot and a free page to fill or throws
568 sfileno
569 MemStore::reserveSapForWriting(Ipc::Mem::PageId &page)
570 {
571 Ipc::Mem::PageId slot;
572 if (freeSlots->pop(slot)) {
573 debugs(20, 5, "got a previously free slot: " << slot);
574
575 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
576 debugs(20, 5, "and got a previously free page: " << page);
577 return slot.number - 1;
578 } else {
579 debugs(20, 3, "but there is no free page, returning " << slot);
580 freeSlots->push(slot);
581 }
582 }
583
584 // catch free slots delivered to noteFreeMapSlice()
585 assert(!waitingFor);
586 waitingFor.slot = &slot;
587 waitingFor.page = &page;
588 if (map->purgeOne()) {
589 assert(!waitingFor); // noteFreeMapSlice() should have cleared it
590 assert(slot.set());
591 assert(page.set());
592 debugs(20, 5, "got previously busy " << slot << " and " << page);
593 return slot.number - 1;
594 }
595 assert(waitingFor.slot == &slot && waitingFor.page == &page);
596 waitingFor.slot = NULL;
597 waitingFor.page = NULL;
598
599 debugs(47, 3, "cannot get a slice; entries: " << map->entryCount());
600 throw TexcHere("ran out of mem-cache slots");
601 }
602
603 void
604 MemStore::noteFreeMapSlice(const sfileno sliceId)
605 {
606 Ipc::Mem::PageId &pageId = map->extras(sliceId).page;
607 debugs(20, 9, "slice " << sliceId << " freed " << pageId);
608 assert(pageId);
609 Ipc::Mem::PageId slotId;
610 slotId.pool = SpacePoolId;
611 slotId.number = sliceId + 1;
612 if (!waitingFor) {
613 // must zero pageId before we give slice (and pageId extras!) to others
614 Ipc::Mem::PutPage(pageId);
615 freeSlots->push(slotId);
616 } else {
617 *waitingFor.slot = slotId;
618 *waitingFor.page = pageId;
619 waitingFor.slot = NULL;
620 waitingFor.page = NULL;
621 pageId = Ipc::Mem::PageId();
622 }
623 }
624
625 void
626 MemStore::unlink(StoreEntry &e)
627 {
628 assert(e.mem_obj);
629 if (e.mem_obj->mem_index >= 0) {
630 map->freeEntry(e.mem_obj->mem_index);
631 disconnect(e);
632 } else {
633 map->freeEntryByKey(reinterpret_cast<cache_key*>(e.key));
634 }
635 e.destroyMemObject();
636 }
637
638 void
639 MemStore::disconnect(StoreEntry &e)
640 {
641 assert(e.mem_obj);
642 if (e.mem_obj->mem_index >= 0) {
643 map->abortIo(e.mem_obj->mem_index);
644 e.mem_obj->mem_index = -1;
645 }
646 }
647
648 /// calculates maximum number of entries we need to store and map
649 int64_t
650 MemStore::EntryLimit()
651 {
652 if (!Config.memShared || !Config.memMaxSize)
653 return 0; // no memory cache configured
654
655 const int64_t minEntrySize = Ipc::Mem::PageSize();
656 const int64_t entryLimit = Config.memMaxSize / minEntrySize;
657 return entryLimit;
658 }
659
660 /// reports our needs for shared memory pages to Ipc::Mem::Pages
661 class MemStoreClaimMemoryNeedsRr: public RegisteredRunner
662 {
663 public:
664 /* RegisteredRunner API */
665 virtual void run(const RunnerRegistry &r);
666 };
667
668 RunnerRegistrationEntry(rrClaimMemoryNeeds, MemStoreClaimMemoryNeedsRr);
669
670 void
671 MemStoreClaimMemoryNeedsRr::run(const RunnerRegistry &)
672 {
673 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
674 }
675
676 /// decides whether to use a shared memory cache or checks its configuration
677 class MemStoreCfgRr: public ::RegisteredRunner
678 {
679 public:
680 /* RegisteredRunner API */
681 virtual void run(const RunnerRegistry &);
682 };
683
684 RunnerRegistrationEntry(rrFinalizeConfig, MemStoreCfgRr);
685
686 void MemStoreCfgRr::run(const RunnerRegistry &r)
687 {
688 // decide whether to use a shared memory cache if the user did not specify
689 if (!Config.memShared.configured()) {
690 Config.memShared.configure(Ipc::Atomic::Enabled() &&
691 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
692 Config.memMaxSize > 0);
693 } else if (Config.memShared && !Ipc::Atomic::Enabled()) {
694 // bail if the user wants shared memory cache but we cannot support it
695 fatal("memory_cache_shared is on, but no support for atomic operations detected");
696 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
697 fatal("memory_cache_shared is on, but no support for shared memory detected");
698 } else if (Config.memShared && !UsingSmp()) {
699 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
700 " a single worker is running");
701 }
702 }
703
704 /// initializes shared memory segments used by MemStore
705 class MemStoreRr: public Ipc::Mem::RegisteredRunner
706 {
707 public:
708 /* RegisteredRunner API */
709 MemStoreRr(): spaceOwner(NULL), mapOwner(NULL) {}
710 virtual void run(const RunnerRegistry &);
711 virtual ~MemStoreRr();
712
713 protected:
714 virtual void create(const RunnerRegistry &);
715
716 private:
717 Ipc::Mem::Owner<Ipc::Mem::PageStack> *spaceOwner; ///< free slices Owner
718 MemStoreMap::Owner *mapOwner; ///< primary map Owner
719 };
720
721 RunnerRegistrationEntry(rrAfterConfig, MemStoreRr);
722
723 void MemStoreRr::run(const RunnerRegistry &r)
724 {
725 assert(Config.memShared.configured());
726 Ipc::Mem::RegisteredRunner::run(r);
727 }
728
729 void MemStoreRr::create(const RunnerRegistry &)
730 {
731 if (!Config.memShared)
732 return;
733
734 const int64_t entryLimit = MemStore::EntryLimit();
735 if (entryLimit <= 0) {
736 if (Config.memMaxSize > 0) {
737 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small ("
738 << (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
739 (Ipc::Mem::PageSize() / 1024.0) << " KB");
740 }
741 return; // no memory cache configured or a misconfiguration
742 }
743
744 Must(!spaceOwner);
745 spaceOwner = shm_new(Ipc::Mem::PageStack)(SpaceLabel, SpacePoolId,
746 entryLimit,
747 sizeof(Ipc::Mem::PageId));
748 Must(!mapOwner);
749 mapOwner = MemStoreMap::Init(MapLabel, entryLimit);
750 }
751
752 MemStoreRr::~MemStoreRr()
753 {
754 delete mapOwner;
755 delete spaceOwner;
756 }