]>
git.ipfire.org Git - thirdparty/squid.git/blob - src/MemStore.cc
2 * Copyright (C) 1996-2015 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 20 Memory Cache */
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
19 #include "mime_header.h"
20 #include "SquidConfig.h"
21 #include "SquidMath.h"
22 #include "StoreStats.h"
25 /// shared memory segment path to use for MemStore maps
26 static const SBuf
MapLabel("cache_mem_map");
27 /// shared memory segment path to use for the free slices index
28 static const char *SpaceLabel
= "cache_mem_space";
29 /// shared memory segment path to use for IDs of shared pages with slice data
30 static const char *ExtrasLabel
= "cache_mem_ex";
31 // TODO: sync with Rock::SwapDir::*Path()
33 // We store free slot IDs (i.e., "space") as Page objects so that we can use
34 // Ipc::Mem::PageStack. Pages require pool IDs. The value here is not really
35 // used except for a positivity test. A unique value is handy for debugging.
36 static const uint32_t SpacePoolId
= 510716;
38 MemStore::MemStore(): map(NULL
), lastWritingSlice(-1)
50 const int64_t entryLimit
= EntryLimit();
52 return; // no memory cache configured or a misconfiguration
54 // check compatibility with the disk cache, if any
55 if (Config
.cacheSwap
.n_configured
> 0) {
56 const int64_t diskMaxSize
= Store::Root().maxObjectSize();
57 const int64_t memMaxSize
= maxObjectSize();
58 if (diskMaxSize
== -1) {
59 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
60 "is unlimited but mem-cache maximum object size is " <<
61 memMaxSize
/ 1024.0 << " KB");
62 } else if (diskMaxSize
> memMaxSize
) {
63 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
64 "is too large for mem-cache: " <<
65 diskMaxSize
/ 1024.0 << " KB > " <<
66 memMaxSize
/ 1024.0 << " KB");
70 freeSlots
= shm_old(Ipc::Mem::PageStack
)(SpaceLabel
);
71 extras
= shm_old(Extras
)(ExtrasLabel
);
74 map
= new MemStoreMap(MapLabel
);
79 MemStore::getStats(StoreInfoStats
&stats
) const
81 const size_t pageSize
= Ipc::Mem::PageSize();
83 stats
.mem
.shared
= true;
85 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
87 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
88 stats
.mem
.count
= currentCount();
92 MemStore::stat(StoreEntry
&e
) const
94 storeAppendPrintf(&e
, "\n\nShared Memory Cache\n");
96 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
97 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
98 currentSize() / 1024.0,
99 Math::doublePercent(currentSize(), maxSize()));
102 const int entryLimit
= map
->entryLimit();
103 const int slotLimit
= map
->sliceLimit();
104 storeAppendPrintf(&e
, "Maximum entries: %9d\n", entryLimit
);
105 if (entryLimit
> 0) {
106 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
107 currentCount(), (100.0 * currentCount() / entryLimit
));
110 storeAppendPrintf(&e
, "Maximum slots: %9d\n", slotLimit
);
112 const unsigned int slotsFree
=
113 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage
);
114 if (slotsFree
<= static_cast<const unsigned int>(slotLimit
)) {
115 const int usedSlots
= slotLimit
- static_cast<const int>(slotsFree
);
116 storeAppendPrintf(&e
, "Used slots: %9d %.2f%%\n",
117 usedSlots
, (100.0 * usedSlots
/ slotLimit
));
120 if (slotLimit
< 100) { // XXX: otherwise too expensive to count
121 Ipc::ReadWriteLockStats stats
;
122 map
->updateStats(stats
);
135 MemStore::minSize() const
137 return 0; // XXX: irrelevant, but Store parent forces us to implement this
141 MemStore::maxSize() const
143 return Config
.memMaxSize
;
147 MemStore::currentSize() const
149 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) *
150 Ipc::Mem::PageSize();
154 MemStore::currentCount() const
156 return map
? map
->entryCount() : 0;
160 MemStore::maxObjectSize() const
162 return min(Config
.Store
.maxInMemObjSize
, Config
.memMaxSize
);
166 MemStore::reference(StoreEntry
&)
171 MemStore::dereference(StoreEntry
&, bool)
173 // no need to keep e in the global store_table for us; we have our own map
184 MemStore::search(String
const, HttpRequest
*)
186 fatal("not implemented");
191 MemStore::get(const cache_key
*key
)
197 const Ipc::StoreMapAnchor
*const slot
= map
->openForReading(key
, index
);
201 // create a brand new store entry and initialize it with stored info
202 StoreEntry
*e
= new StoreEntry();
204 // XXX: We do not know the URLs yet, only the key, but we need to parse and
205 // store the response for the Root().get() callers to be happy because they
206 // expect IN_MEMORY entries to already have the response headers and body.
209 anchorEntry(*e
, index
, *slot
);
211 const bool copied
= copyFromShm(*e
, index
, *slot
);
218 debugs(20, 3, HERE
<< "mem-loading failed; freeing " << index
);
219 map
->freeEntry(index
); // do not let others into the same trap
224 MemStore::get(String
const, STOREGETCLIENT
, void *)
226 // XXX: not needed but Store parent forces us to implement this
227 fatal("MemStore::get(key,callback,data) should not be called");
231 MemStore::anchorCollapsed(StoreEntry
&collapsed
, bool &inSync
)
237 const Ipc::StoreMapAnchor
*const slot
= map
->openForReading(
238 reinterpret_cast<cache_key
*>(collapsed
.key
), index
);
242 anchorEntry(collapsed
, index
, *slot
);
243 inSync
= updateCollapsedWith(collapsed
, index
, *slot
);
244 return true; // even if inSync is false
248 MemStore::updateCollapsed(StoreEntry
&collapsed
)
250 assert(collapsed
.mem_obj
);
252 const sfileno index
= collapsed
.mem_obj
->memCache
.index
;
254 // already disconnected from the cache, no need to update
261 const Ipc::StoreMapAnchor
&anchor
= map
->readableEntry(index
);
262 return updateCollapsedWith(collapsed
, index
, anchor
);
265 /// updates collapsed entry after its anchor has been located
267 MemStore::updateCollapsedWith(StoreEntry
&collapsed
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
269 collapsed
.swap_file_sz
= anchor
.basics
.swap_file_sz
;
270 const bool copied
= copyFromShm(collapsed
, index
, anchor
);
274 /// anchors StoreEntry to an already locked map entry
276 MemStore::anchorEntry(StoreEntry
&e
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
278 const Ipc::StoreMapAnchor::Basics
&basics
= anchor
.basics
;
280 e
.swap_file_sz
= basics
.swap_file_sz
;
281 e
.lastref
= basics
.lastref
;
282 e
.timestamp
= basics
.timestamp
;
283 e
.expires
= basics
.expires
;
284 e
.lastmod
= basics
.lastmod
;
285 e
.refcount
= basics
.refcount
;
286 e
.flags
= basics
.flags
;
289 if (anchor
.complete()) {
290 e
.store_status
= STORE_OK
;
291 e
.mem_obj
->object_sz
= e
.swap_file_sz
;
292 e
.setMemStatus(IN_MEMORY
);
294 e
.store_status
= STORE_PENDING
;
295 assert(e
.mem_obj
->object_sz
< 0);
296 e
.setMemStatus(NOT_IN_MEMORY
);
298 assert(e
.swap_status
== SWAPOUT_NONE
); // set in StoreEntry constructor
299 e
.ping_status
= PING_NONE
;
301 EBIT_CLR(e
.flags
, RELEASE_REQUEST
);
302 EBIT_CLR(e
.flags
, KEY_PRIVATE
);
303 EBIT_SET(e
.flags
, ENTRY_VALIDATED
);
305 MemObject::MemCache
&mc
= e
.mem_obj
->memCache
;
307 mc
.io
= MemObject::ioReading
;
310 /// copies the entire entry from shared to local memory
312 MemStore::copyFromShm(StoreEntry
&e
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
314 debugs(20, 7, "mem-loading entry " << index
<< " from " << anchor
.start
);
317 // emulate the usual Store code but w/o inapplicable checks and callbacks:
319 Ipc::StoreMapSliceId sid
= anchor
.start
; // optimize: remember the last sid
320 bool wasEof
= anchor
.complete() && sid
< 0;
321 int64_t sliceOffset
= 0;
323 const Ipc::StoreMapSlice
&slice
= map
->readableSlice(index
, sid
);
324 // slice state may change during copying; take snapshots now
325 wasEof
= anchor
.complete() && slice
.next
< 0;
326 const Ipc::StoreMapSlice::Size wasSize
= slice
.size
;
328 debugs(20, 9, "entry " << index
<< " slice " << sid
<< " eof " <<
329 wasEof
<< " wasSize " << wasSize
<< " <= " <<
330 anchor
.basics
.swap_file_sz
<< " sliceOffset " << sliceOffset
<<
331 " mem.endOffset " << e
.mem_obj
->endOffset());
333 if (e
.mem_obj
->endOffset() < sliceOffset
+ wasSize
) {
334 // size of the slice data that we already copied
335 const size_t prefixSize
= e
.mem_obj
->endOffset() - sliceOffset
;
336 assert(prefixSize
<= wasSize
);
338 const MemStoreMapExtras::Item
&extra
= extras
->items
[sid
];
340 char *page
= static_cast<char*>(PagePointer(extra
.page
));
341 const StoreIOBuffer
sliceBuf(wasSize
- prefixSize
,
342 e
.mem_obj
->endOffset(),
344 if (!copyFromShmSlice(e
, sliceBuf
, wasEof
))
346 debugs(20, 9, "entry " << index
<< " copied slice " << sid
<<
347 " from " << extra
.page
<< '+' << prefixSize
);
349 // else skip a [possibly incomplete] slice that we copied earlier
351 // careful: the slice may have grown _and_ gotten the next slice ID!
352 if (slice
.next
>= 0) {
354 // here we know that slice.size may not change any more
355 if (wasSize
>= slice
.size
) { // did not grow since we started copying
356 sliceOffset
+= wasSize
;
359 } else if (wasSize
>= slice
.size
) { // did not grow
365 debugs(20, 7, "mem-loaded " << e
.mem_obj
->endOffset() << '/' <<
366 anchor
.basics
.swap_file_sz
<< " bytes of " << e
);
370 debugs(20, 7, "mem-loaded all " << e
.mem_obj
->object_sz
<< '/' <<
371 anchor
.basics
.swap_file_sz
<< " bytes of " << e
);
373 // from StoreEntry::complete()
374 e
.mem_obj
->object_sz
= e
.mem_obj
->endOffset();
375 e
.store_status
= STORE_OK
;
376 e
.setMemStatus(IN_MEMORY
);
378 assert(e
.mem_obj
->object_sz
>= 0);
379 assert(static_cast<uint64_t>(e
.mem_obj
->object_sz
) == anchor
.basics
.swap_file_sz
);
380 // would be nice to call validLength() here, but it needs e.key
382 // we read the entire response into the local memory; no more need to lock
387 /// imports one shared memory slice into local memory
389 MemStore::copyFromShmSlice(StoreEntry
&e
, const StoreIOBuffer
&buf
, bool eof
)
391 debugs(20, 7, "buf: " << buf
.offset
<< " + " << buf
.length
);
393 // from store_client::readBody()
394 // parse headers if needed; they might span multiple slices!
395 HttpReply
*rep
= (HttpReply
*)e
.getReply();
396 if (rep
->pstate
< psParsed
) {
397 // XXX: have to copy because httpMsgParseStep() requires 0-termination
399 mb
.init(buf
.length
+1, buf
.length
+1);
400 mb
.append(buf
.data
, buf
.length
);
402 const int result
= rep
->httpMsgParseStep(mb
.buf
, buf
.length
, eof
);
404 assert(rep
->pstate
== psParsed
);
405 EBIT_CLR(e
.flags
, ENTRY_FWD_HDR_WAIT
);
406 } else if (result
< 0) {
407 debugs(20, DBG_IMPORTANT
, "Corrupted mem-cached headers: " << e
);
409 } else { // more slices are needed
413 debugs(20, 7, "rep pstate: " << rep
->pstate
);
415 // local memory stores both headers and body so copy regardless of pstate
416 const int64_t offBefore
= e
.mem_obj
->endOffset();
417 assert(e
.mem_obj
->data_hdr
.write(buf
)); // from MemObject::write()
418 const int64_t offAfter
= e
.mem_obj
->endOffset();
419 // expect to write the entire buf because StoreEntry::write() never fails
420 assert(offAfter
>= 0 && offBefore
<= offAfter
&&
421 static_cast<size_t>(offAfter
- offBefore
) == buf
.length
);
425 /// whether we should cache the entry
427 MemStore::shouldCache(StoreEntry
&e
) const
429 if (e
.mem_status
== IN_MEMORY
) {
430 debugs(20, 5, "already loaded from mem-cache: " << e
);
434 if (e
.mem_obj
&& e
.mem_obj
->memCache
.offset
> 0) {
435 debugs(20, 5, "already written to mem-cache: " << e
);
439 if (!e
.memoryCachable()) {
440 debugs(20, 7, HERE
<< "Not memory cachable: " << e
);
441 return false; // will not cache due to entry state or properties
446 if (e
.mem_obj
->vary_headers
) {
447 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
448 debugs(20, 5, "Vary not yet supported: " << e
.mem_obj
->vary_headers
);
452 const int64_t expectedSize
= e
.mem_obj
->expectedReplySize(); // may be < 0
454 // objects of unknown size are not allowed into memory cache, for now
455 if (expectedSize
< 0) {
456 debugs(20, 5, "Unknown expected size: " << e
);
460 const int64_t loadedSize
= e
.mem_obj
->endOffset();
461 const int64_t ramSize
= max(loadedSize
, expectedSize
);
463 if (ramSize
> maxObjectSize()) {
464 debugs(20, 5, HERE
<< "Too big max(" <<
465 loadedSize
<< ", " << expectedSize
<< "): " << e
);
466 return false; // will not cache due to cachable entry size limits
469 if (!e
.mem_obj
->isContiguous()) {
470 debugs(20, 5, "not contiguous");
475 debugs(20, 5, HERE
<< "No map to mem-cache " << e
);
479 if (EBIT_TEST(e
.flags
, ENTRY_SPECIAL
)) {
480 debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e
);
487 /// locks map anchor and preps to store the entry in shared memory
489 MemStore::startCaching(StoreEntry
&e
)
492 Ipc::StoreMapAnchor
*slot
= map
->openForWriting(reinterpret_cast<const cache_key
*>(e
.key
), index
);
494 debugs(20, 5, HERE
<< "No room in mem-cache map to index " << e
);
499 e
.mem_obj
->memCache
.index
= index
;
500 e
.mem_obj
->memCache
.io
= MemObject::ioWriting
;
502 map
->startAppending(index
);
503 e
.memOutDecision(true);
507 /// copies all local data to shared memory
509 MemStore::copyToShm(StoreEntry
&e
)
511 // prevents remote readers from getting ENTRY_FWD_HDR_WAIT entries and
512 // not knowing when the wait is over
513 if (EBIT_TEST(e
.flags
, ENTRY_FWD_HDR_WAIT
)) {
514 debugs(20, 5, "postponing copying " << e
<< " for ENTRY_FWD_HDR_WAIT");
521 const int32_t index
= e
.mem_obj
->memCache
.index
;
523 Ipc::StoreMapAnchor
&anchor
= map
->writeableEntry(index
);
525 const int64_t eSize
= e
.mem_obj
->endOffset();
526 if (e
.mem_obj
->memCache
.offset
>= eSize
) {
527 debugs(20, 5, "postponing copying " << e
<< " for lack of news: " <<
528 e
.mem_obj
->memCache
.offset
<< " >= " << eSize
);
529 return; // nothing to do (yet)
532 if (anchor
.start
< 0) { // must allocate the very first slot for e
533 Ipc::Mem::PageId page
;
534 anchor
.start
= reserveSapForWriting(page
); // throws
535 extras
->items
[anchor
.start
].page
= page
;
538 lastWritingSlice
= anchor
.start
;
539 const size_t sliceCapacity
= Ipc::Mem::PageSize();
541 // fill, skip slices that are already full
542 // Optimize: remember lastWritingSlice in e.mem_obj
543 while (e
.mem_obj
->memCache
.offset
< eSize
) {
544 Ipc::StoreMap::Slice
&slice
=
545 map
->writeableSlice(e
.mem_obj
->memCache
.index
, lastWritingSlice
);
547 if (slice
.size
>= sliceCapacity
) {
548 if (slice
.next
>= 0) {
549 lastWritingSlice
= slice
.next
;
553 Ipc::Mem::PageId page
;
554 slice
.next
= lastWritingSlice
= reserveSapForWriting(page
);
555 extras
->items
[lastWritingSlice
].page
= page
;
556 debugs(20, 7, "entry " << index
<< " new slice: " << lastWritingSlice
);
559 copyToShmSlice(e
, anchor
);
562 debugs(20, 7, "mem-cached available " << eSize
<< " bytes of " << e
);
565 /// copies at most one slice worth of local memory to shared memory
567 MemStore::copyToShmSlice(StoreEntry
&e
, Ipc::StoreMapAnchor
&anchor
)
569 Ipc::StoreMap::Slice
&slice
=
570 map
->writeableSlice(e
.mem_obj
->memCache
.index
, lastWritingSlice
);
572 Ipc::Mem::PageId page
= extras
->items
[lastWritingSlice
].page
;
573 assert(lastWritingSlice
>= 0 && page
);
574 debugs(20, 7, "entry " << e
<< " slice " << lastWritingSlice
<< " has " <<
577 const int64_t bufSize
= Ipc::Mem::PageSize();
578 const int64_t sliceOffset
= e
.mem_obj
->memCache
.offset
% bufSize
;
579 StoreIOBuffer
sharedSpace(bufSize
- sliceOffset
, e
.mem_obj
->memCache
.offset
,
580 static_cast<char*>(PagePointer(page
)) + sliceOffset
);
582 // check that we kept everything or purge incomplete/sparse cached entry
583 const ssize_t copied
= e
.mem_obj
->data_hdr
.copy(sharedSpace
);
585 debugs(20, 2, "Failed to mem-cache " << (bufSize
- sliceOffset
) <<
586 " bytes of " << e
<< " from " << e
.mem_obj
->memCache
.offset
<<
588 throw TexcHere("data_hdr.copy failure");
591 debugs(20, 7, "mem-cached " << copied
<< " bytes of " << e
<<
592 " from " << e
.mem_obj
->memCache
.offset
<< " in " << page
);
594 slice
.size
+= copied
;
595 e
.mem_obj
->memCache
.offset
+= copied
;
596 anchor
.basics
.swap_file_sz
= e
.mem_obj
->memCache
.offset
;
599 /// finds a slot and a free page to fill or throws
601 MemStore::reserveSapForWriting(Ipc::Mem::PageId
&page
)
603 Ipc::Mem::PageId slot
;
604 if (freeSlots
->pop(slot
)) {
605 debugs(20, 5, "got a previously free slot: " << slot
);
607 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage
, page
)) {
608 debugs(20, 5, "and got a previously free page: " << page
);
609 return slot
.number
- 1;
611 debugs(20, 3, "but there is no free page, returning " << slot
);
612 freeSlots
->push(slot
);
616 // catch free slots delivered to noteFreeMapSlice()
618 waitingFor
.slot
= &slot
;
619 waitingFor
.page
= &page
;
620 if (map
->purgeOne()) {
621 assert(!waitingFor
); // noteFreeMapSlice() should have cleared it
624 debugs(20, 5, "got previously busy " << slot
<< " and " << page
);
625 return slot
.number
- 1;
627 assert(waitingFor
.slot
== &slot
&& waitingFor
.page
== &page
);
628 waitingFor
.slot
= NULL
;
629 waitingFor
.page
= NULL
;
631 debugs(47, 3, "cannot get a slice; entries: " << map
->entryCount());
632 throw TexcHere("ran out of mem-cache slots");
636 MemStore::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId
)
638 Ipc::Mem::PageId
&pageId
= extras
->items
[sliceId
].page
;
639 debugs(20, 9, "slice " << sliceId
<< " freed " << pageId
);
641 Ipc::Mem::PageId slotId
;
642 slotId
.pool
= SpacePoolId
;
643 slotId
.number
= sliceId
+ 1;
645 // must zero pageId before we give slice (and pageId extras!) to others
646 Ipc::Mem::PutPage(pageId
);
647 freeSlots
->push(slotId
);
649 *waitingFor
.slot
= slotId
;
650 *waitingFor
.page
= pageId
;
651 waitingFor
.slot
= NULL
;
652 waitingFor
.page
= NULL
;
653 pageId
= Ipc::Mem::PageId();
658 MemStore::write(StoreEntry
&e
)
662 debugs(20, 7, "entry " << e
);
664 switch (e
.mem_obj
->memCache
.io
) {
665 case MemObject::ioUndecided
:
666 if (!shouldCache(e
) || !startCaching(e
)) {
667 e
.mem_obj
->memCache
.io
= MemObject::ioDone
;
668 e
.memOutDecision(false);
673 case MemObject::ioDone
:
674 case MemObject::ioReading
:
675 return; // we should not write in all of the above cases
677 case MemObject::ioWriting
:
678 break; // already decided to write and still writing
683 if (e
.store_status
== STORE_OK
) // done receiving new content
686 CollapsedForwarding::Broadcast(e
);
688 } catch (const std::exception
&x
) { // TODO: should we catch ... as well?
689 debugs(20, 2, "mem-caching error writing entry " << e
<< ": " << x
.what());
690 // fall through to the error handling code
697 MemStore::completeWriting(StoreEntry
&e
)
700 const int32_t index
= e
.mem_obj
->memCache
.index
;
704 debugs(20, 5, "mem-cached all " << e
.mem_obj
->memCache
.offset
<< " bytes of " << e
);
706 e
.mem_obj
->memCache
.index
= -1;
707 e
.mem_obj
->memCache
.io
= MemObject::ioDone
;
708 map
->closeForWriting(index
, false);
710 CollapsedForwarding::Broadcast(e
); // before we close our transient entry!
711 Store::Root().transientsCompleteWriting(e
);
715 MemStore::markForUnlink(StoreEntry
&e
)
718 if (e
.mem_obj
->memCache
.index
>= 0)
719 map
->freeEntry(e
.mem_obj
->memCache
.index
);
723 MemStore::unlink(StoreEntry
&e
)
725 if (e
.mem_obj
&& e
.mem_obj
->memCache
.index
>= 0) {
726 map
->freeEntry(e
.mem_obj
->memCache
.index
);
729 // the entry may have been loaded and then disconnected from the cache
730 map
->freeEntryByKey(reinterpret_cast<cache_key
*>(e
.key
));
733 e
.destroyMemObject(); // XXX: but it may contain useful info such as a client list. The old code used to do that though, right?
737 MemStore::disconnect(StoreEntry
&e
)
740 MemObject
&mem_obj
= *e
.mem_obj
;
741 if (mem_obj
.memCache
.index
>= 0) {
742 if (mem_obj
.memCache
.io
== MemObject::ioWriting
) {
743 map
->abortWriting(mem_obj
.memCache
.index
);
744 mem_obj
.memCache
.index
= -1;
745 mem_obj
.memCache
.io
= MemObject::ioDone
;
746 Store::Root().transientsAbandon(e
); // broadcasts after the change
748 assert(mem_obj
.memCache
.io
== MemObject::ioReading
);
749 map
->closeForReading(mem_obj
.memCache
.index
);
750 mem_obj
.memCache
.index
= -1;
751 mem_obj
.memCache
.io
= MemObject::ioDone
;
756 /// calculates maximum number of entries we need to store and map
758 MemStore::EntryLimit()
760 if (!Config
.memShared
|| !Config
.memMaxSize
)
761 return 0; // no memory cache configured
763 const int64_t minEntrySize
= Ipc::Mem::PageSize();
764 const int64_t entryLimit
= Config
.memMaxSize
/ minEntrySize
;
768 /// reports our needs for shared memory pages to Ipc::Mem::Pages;
769 /// decides whether to use a shared memory cache or checks its configuration;
770 /// and initializes shared memory segments used by MemStore
771 class MemStoreRr
: public Ipc::Mem::RegisteredRunner
774 /* RegisteredRunner API */
775 MemStoreRr(): spaceOwner(NULL
), mapOwner(NULL
), extrasOwner(NULL
) {}
776 virtual void finalizeConfig();
777 virtual void claimMemoryNeeds();
778 virtual void useConfig();
779 virtual ~MemStoreRr();
782 /* Ipc::Mem::RegisteredRunner API */
783 virtual void create();
786 Ipc::Mem::Owner
<Ipc::Mem::PageStack
> *spaceOwner
; ///< free slices Owner
787 MemStoreMap::Owner
*mapOwner
; ///< primary map Owner
788 Ipc::Mem::Owner
<MemStoreMapExtras
> *extrasOwner
; ///< PageIds Owner
791 RunnerRegistrationEntry(MemStoreRr
);
794 MemStoreRr::claimMemoryNeeds()
796 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage
, MemStore::EntryLimit());
800 MemStoreRr::finalizeConfig()
802 // decide whether to use a shared memory cache if the user did not specify
803 if (!Config
.memShared
.configured()) {
804 Config
.memShared
.configure(Ipc::Atomic::Enabled() &&
805 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
806 Config
.memMaxSize
> 0);
807 } else if (Config
.memShared
&& !Ipc::Atomic::Enabled()) {
808 // bail if the user wants shared memory cache but we cannot support it
809 fatal("memory_cache_shared is on, but no support for atomic operations detected");
810 } else if (Config
.memShared
&& !Ipc::Mem::Segment::Enabled()) {
811 fatal("memory_cache_shared is on, but no support for shared memory detected");
812 } else if (Config
.memShared
&& !UsingSmp()) {
813 debugs(20, DBG_IMPORTANT
, "WARNING: memory_cache_shared is on, but only"
814 " a single worker is running");
819 MemStoreRr::useConfig()
821 assert(Config
.memShared
.configured());
822 Ipc::Mem::RegisteredRunner::useConfig();
828 if (!Config
.memShared
)
831 const int64_t entryLimit
= MemStore::EntryLimit();
832 if (entryLimit
<= 0) {
833 if (Config
.memMaxSize
> 0) {
834 debugs(20, DBG_IMPORTANT
, "WARNING: mem-cache size is too small ("
835 << (Config
.memMaxSize
/ 1024.0) << " KB), should be >= " <<
836 (Ipc::Mem::PageSize() / 1024.0) << " KB");
838 return; // no memory cache configured or a misconfiguration
842 spaceOwner
= shm_new(Ipc::Mem::PageStack
)(SpaceLabel
, SpacePoolId
,
845 mapOwner
= MemStoreMap::Init(MapLabel
, entryLimit
);
847 extrasOwner
= shm_new(MemStoreMapExtras
)(ExtrasLabel
, entryLimit
);
850 MemStoreRr::~MemStoreRr()