2 * DEBUG: section 20 Memory Cache
7 #include "base/RunnersRegistry.h"
8 #include "CollapsedForwarding.h"
10 #include "ipc/mem/Page.h"
11 #include "ipc/mem/Pages.h"
12 #include "MemObject.h"
14 #include "mime_header.h"
15 #include "SquidConfig.h"
16 #include "SquidMath.h"
17 #include "StoreStats.h"
20 /// shared memory segment path to use for MemStore maps
21 static const char *MapLabel
= "cache_mem_map";
22 /// shared memory segment path to use for the free slices index
23 static const char *SpaceLabel
= "cache_mem_space";
24 // TODO: sync with Rock::SwapDir::*Path()
26 // We store free slot IDs (i.e., "space") as Page objects so that we can use
27 // Ipc::Mem::PageStack. Pages require pool IDs. The value here is not really
28 // used except for a positivity test. A unique value is handy for debugging.
29 static const uint32_t SpacePoolId
= 510716;
32 MemStore::MemStore(): map(NULL
), lastWritingSlice(-1)
44 const int64_t entryLimit
= EntryLimit();
46 return; // no memory cache configured or a misconfiguration
48 // check compatibility with the disk cache, if any
49 if (Config
.cacheSwap
.n_configured
> 0) {
50 const int64_t diskMaxSize
= Store::Root().maxObjectSize();
51 const int64_t memMaxSize
= maxObjectSize();
52 if (diskMaxSize
== -1) {
53 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
54 "is unlimited but mem-cache maximum object size is " <<
55 memMaxSize
/ 1024.0 << " KB");
56 } else if (diskMaxSize
> memMaxSize
) {
57 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
58 "is too large for mem-cache: " <<
59 diskMaxSize
/ 1024.0 << " KB > " <<
60 memMaxSize
/ 1024.0 << " KB");
64 freeSlots
= shm_old(Ipc::Mem::PageStack
)(SpaceLabel
);
67 map
= new MemStoreMap(MapLabel
);
72 MemStore::getStats(StoreInfoStats
&stats
) const
74 const size_t pageSize
= Ipc::Mem::PageSize();
76 stats
.mem
.shared
= true;
78 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
80 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
81 stats
.mem
.count
= currentCount();
85 MemStore::stat(StoreEntry
&e
) const
87 storeAppendPrintf(&e
, "\n\nShared Memory Cache\n");
89 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
90 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
91 currentSize() / 1024.0,
92 Math::doublePercent(currentSize(), maxSize()));
95 const int limit
= map
->entryLimit();
96 storeAppendPrintf(&e
, "Maximum entries: %9d\n", limit
);
98 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
99 currentCount(), (100.0 * currentCount() / limit
));
101 const unsigned int slotsFree
=
102 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage
);
103 if (slotsFree
<= static_cast<const unsigned int>(limit
)) {
104 const int usedSlots
= limit
- static_cast<const int>(slotsFree
);
105 storeAppendPrintf(&e
, "Used slots: %9d %.2f%%\n",
106 usedSlots
, (100.0 * usedSlots
/ limit
));
109 if (limit
< 100) { // XXX: otherwise too expensive to count
110 Ipc::ReadWriteLockStats stats
;
111 map
->updateStats(stats
);
124 MemStore::minSize() const
126 return 0; // XXX: irrelevant, but Store parent forces us to implement this
130 MemStore::maxSize() const
132 return Config
.memMaxSize
;
136 MemStore::currentSize() const
138 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) *
139 Ipc::Mem::PageSize();
143 MemStore::currentCount() const
145 return map
? map
->entryCount() : 0;
149 MemStore::maxObjectSize() const
151 return min(Config
.Store
.maxInMemObjSize
, Config
.memMaxSize
);
155 MemStore::reference(StoreEntry
&)
160 MemStore::dereference(StoreEntry
&, bool)
162 // no need to keep e in the global store_table for us; we have our own map
173 MemStore::search(String
const, HttpRequest
*)
175 fatal("not implemented");
180 MemStore::get(const cache_key
*key
)
186 const Ipc::StoreMapAnchor
*const slot
= map
->openForReading(key
, index
);
190 // create a brand new store entry and initialize it with stored info
191 StoreEntry
*e
= new StoreEntry();
194 // XXX: We do not know the URLs yet, only the key, but we need to parse and
195 // store the response for the Root().get() callers to be happy because they
196 // expect IN_MEMORY entries to already have the response headers and body.
197 // At least one caller calls createMemObject() if there is not one, so
198 // we hide the true object until that happens (to avoid leaking TBD URLs).
199 e
->createMemObject("TBD", "TBD");
201 anchorEntry(*e
, index
, *slot
);
203 const bool copied
= copyFromShm(*e
, index
, *slot
);
205 // we copied everything we could to local memory; no more need to lock
206 map
->closeForReading(index
);
207 e
->mem_obj
->mem_index
= -1;
216 debugs(20, 3, HERE
<< "mem-loading failed; freeing " << index
);
217 map
->freeEntry(index
); // do not let others into the same trap
222 MemStore::get(String
const key
, STOREGETCLIENT aCallback
, void *aCallbackData
)
224 // XXX: not needed but Store parent forces us to implement this
225 fatal("MemStore::get(key,callback,data) should not be called");
229 MemStore::anchorCollapsed(StoreEntry
&collapsed
)
235 const Ipc::StoreMapAnchor
*const slot
= map
->openForReading(
236 reinterpret_cast<cache_key
*>(collapsed
.key
), index
);
240 anchorEntry(collapsed
, index
, *slot
);
241 return updateCollapsedWith(collapsed
, index
, *slot
);
245 MemStore::updateCollapsed(StoreEntry
&collapsed
)
250 if (collapsed
.mem_status
!= IN_MEMORY
) // no longer using a memory cache
253 const sfileno index
= collapsed
.mem_obj
->mem_index
;
255 // already disconnected from the cache, no need to update
259 const Ipc::StoreMapAnchor
&anchor
= map
->readableEntry(index
);
260 return updateCollapsedWith(collapsed
, index
, anchor
);
264 MemStore::updateCollapsedWith(StoreEntry
&collapsed
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
266 collapsed
.swap_file_sz
= anchor
.basics
.swap_file_sz
; // XXX: make atomic
268 const bool copied
= copyFromShm(collapsed
, index
, anchor
);
270 return copied
; // XXX: when do we unlock the map slot?
273 /// anchors StoreEntry to an already locked map entry
275 MemStore::anchorEntry(StoreEntry
&e
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
277 const Ipc::StoreMapAnchor::Basics
&basics
= anchor
.basics
;
279 e
.swap_file_sz
= basics
.swap_file_sz
;
280 e
.lastref
= basics
.lastref
;
281 e
.timestamp
= basics
.timestamp
;
282 e
.expires
= basics
.expires
;
283 e
.lastmod
= basics
.lastmod
;
284 e
.refcount
= basics
.refcount
;
285 e
.flags
= basics
.flags
;
288 e
.store_status
= STORE_OK
;
289 e
.setMemStatus(IN_MEMORY
);
290 e
.mem_obj
->mem_index
= index
;
291 assert(e
.swap_status
== SWAPOUT_NONE
); // set in StoreEntry constructor
292 e
.ping_status
= PING_NONE
;
294 EBIT_SET(e
.flags
, ENTRY_CACHABLE
);
295 EBIT_CLR(e
.flags
, RELEASE_REQUEST
);
296 EBIT_CLR(e
.flags
, KEY_PRIVATE
);
297 EBIT_SET(e
.flags
, ENTRY_VALIDATED
);
300 /// copies the entire entry from shared to local memory
302 MemStore::copyFromShm(StoreEntry
&e
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
304 debugs(20, 7, "mem-loading entry " << index
<< " from " << anchor
.start
);
306 // emulate the usual Store code but w/o inapplicable checks and callbacks:
308 Ipc::StoreMapSliceId sid
= anchor
.start
; // optimize: remember the last sid
309 bool wasEof
= anchor
.complete() && sid
< 0;
310 int64_t sliceOffset
= 0;
312 const Ipc::StoreMapSlice
&slice
= map
->readableSlice(index
, sid
);
313 // slice state may change during copying; take snapshots now
314 wasEof
= anchor
.complete() && slice
.next
< 0;
315 const Ipc::StoreMapSlice::Size wasSize
= slice
.size
;
317 if (e
.mem_obj
->endOffset() < sliceOffset
+ wasSize
) {
318 // size of the slice data that we already copied
319 const size_t prefixSize
= e
.mem_obj
->endOffset() - sliceOffset
;
320 assert(prefixSize
<= wasSize
);
322 const MemStoreMap::Extras
&extras
= map
->extras(sid
);
323 char *page
= static_cast<char*>(PagePointer(extras
.page
));
324 const StoreIOBuffer
sliceBuf(wasSize
- prefixSize
,
325 e
.mem_obj
->endOffset(),
327 if (!copyFromShmSlice(e
, sliceBuf
, wasEof
))
329 debugs(20, 9, "entry " << index
<< " copied slice " << sid
<<
330 " from " << extras
.page
<< " +" << prefixSize
);
332 // else skip a [possibly incomplete] slice that we copied earlier
334 // careful: the slice may have grown _and_ gotten the next slice ID!
335 if (slice
.next
>= 0) {
337 // here we know that slice.size may not change any more
338 if (wasSize
>= slice
.size
) { // did not grow since we started copying
339 sliceOffset
+= wasSize
;
342 } else if (wasSize
>= slice
.size
) { // did not grow
348 debugs(20, 7, "mem-loaded " << e
.mem_obj
->endOffset() << '/' <<
349 anchor
.basics
.swap_file_sz
<< " bytes of " << e
);
353 e
.mem_obj
->object_sz
= e
.mem_obj
->endOffset(); // from StoreEntry::complete()
354 debugs(20, 7, "mem-loaded all " << e
.mem_obj
->object_sz
<< '/' <<
355 anchor
.basics
.swap_file_sz
<< " bytes of " << e
);
356 assert(e
.mem_obj
->object_sz
>= 0);
357 assert(static_cast<uint64_t>(e
.mem_obj
->object_sz
) == anchor
.basics
.swap_file_sz
);
358 // would be nice to call validLength() here, but it needs e.key
360 // XXX: unlock acnhor here!
364 /// imports one shared memory slice into local memory
366 MemStore::copyFromShmSlice(StoreEntry
&e
, const StoreIOBuffer
&buf
, bool eof
)
368 debugs(20, 7, "buf: " << buf
.offset
<< " + " << buf
.length
);
370 // from store_client::readBody()
371 // parse headers if needed; they might span multiple slices!
372 HttpReply
*rep
= (HttpReply
*)e
.getReply();
373 if (rep
->pstate
< psParsed
) {
374 // XXX: have to copy because httpMsgParseStep() requires 0-termination
376 mb
.init(buf
.length
+1, buf
.length
+1);
377 mb
.append(buf
.data
, buf
.length
);
379 const int result
= rep
->httpMsgParseStep(mb
.buf
, buf
.length
, eof
);
381 assert(rep
->pstate
== psParsed
);
382 } else if (result
< 0) {
383 debugs(20, DBG_IMPORTANT
, "Corrupted mem-cached headers: " << e
);
385 } else { // more slices are needed
389 debugs(20, 7, "rep pstate: " << rep
->pstate
);
391 // local memory stores both headers and body so copy regardless of pstate
392 const int64_t offBefore
= e
.mem_obj
->endOffset();
393 assert(e
.mem_obj
->data_hdr
.write(buf
)); // from MemObject::write()
394 const int64_t offAfter
= e
.mem_obj
->endOffset();
395 // expect to write the entire buf because StoreEntry::write() never fails
396 assert(offAfter
>= 0 && offBefore
<= offAfter
&&
397 static_cast<size_t>(offAfter
- offBefore
) == buf
.length
);
402 MemStore::keepInLocalMemory(const StoreEntry
&e
) const
404 if (!e
.memoryCachable()) {
405 debugs(20, 7, HERE
<< "Not memory cachable: " << e
);
406 return false; // will not cache due to entry state or properties
410 const int64_t loadedSize
= e
.mem_obj
->endOffset();
411 const int64_t expectedSize
= e
.mem_obj
->expectedReplySize(); // may be < 0
412 const int64_t ramSize
= max(loadedSize
, expectedSize
);
414 if (ramSize
> maxObjectSize()) {
415 debugs(20, 5, HERE
<< "Too big max(" <<
416 loadedSize
<< ", " << expectedSize
<< "): " << e
);
417 return false; // will not cache due to cachable entry size limits
424 MemStore::considerKeeping(StoreEntry
&e
)
426 if (!keepInLocalMemory(e
))
429 // since we copy everything at once, we can only keep complete entries
430 if (e
.store_status
!= STORE_OK
) {
431 debugs(20, 7, HERE
<< "Incomplete: " << e
);
435 if (e
.mem_status
== IN_MEMORY
) {
436 debugs(20, 5, "already mem-cached: " << e
);
442 const int64_t loadedSize
= e
.mem_obj
->endOffset();
443 const int64_t expectedSize
= e
.mem_obj
->expectedReplySize();
445 // objects of unknown size are not allowed into memory cache, for now
446 if (expectedSize
< 0) {
447 debugs(20, 5, HERE
<< "Unknown expected size: " << e
);
451 // since we copy everything at once, we can only keep fully loaded entries
452 if (loadedSize
!= expectedSize
) {
453 debugs(20, 7, HERE
<< "partially loaded: " << loadedSize
<< " != " <<
458 keep(e
); // may still fail
461 /// locks map anchor and calls copyToShm to store the entry in shared memory
463 MemStore::keep(StoreEntry
&e
)
466 debugs(20, 5, HERE
<< "No map to mem-cache " << e
);
471 Ipc::StoreMapAnchor
*slot
= map
->openForWriting(reinterpret_cast<const cache_key
*>(e
.key
), index
);
473 debugs(20, 5, HERE
<< "No room in mem-cache map to index " << e
);
478 if (copyToShm(e
, index
, *slot
)) {
480 map
->closeForWriting(index
, false);
481 CollapsedForwarding::Broadcast(static_cast<const cache_key
*>(e
.key
));
484 // fall through to the error handling code
486 catch (const std::exception
&x
) { // TODO: should we catch ... as well?
487 debugs(20, 2, "mem-caching error writing entry " << index
<<
488 ' ' << e
<< ": " << x
.what());
489 // fall through to the error handling code
493 CollapsedForwarding::Broadcast(static_cast<cache_key
*>(e
.key
));
496 /// copies all local data to shared memory
498 MemStore::copyToShm(StoreEntry
&e
, const sfileno index
, Ipc::StoreMapAnchor
&anchor
)
500 const int64_t eSize
= e
.mem_obj
->endOffset();
502 lastWritingSlice
= -1;
503 while (offset
< eSize
) {
504 if (!copyToShmSlice(e
, index
, anchor
, offset
))
508 // check that we kept everything or purge incomplete/sparse cached entry
509 if (eSize
!= offset
) {
510 debugs(20, 2, "Failed to mem-cache " << e
<< ": " <<
511 eSize
<< " != " << offset
);
515 debugs(20, 7, "mem-cached all " << eSize
<< " bytes of " << e
);
516 e
.swap_file_sz
= eSize
;
521 /// copies one slice worth of local memory to shared memory
523 MemStore::copyToShmSlice(StoreEntry
&e
, const sfileno index
, Ipc::StoreMapAnchor
&anchor
, int64_t &offset
)
525 Ipc::Mem::PageId page
;
526 Ipc::StoreMapSliceId sid
= reserveSapForWriting(page
); // throws
527 assert(sid
>= 0 && page
);
528 map
->extras(sid
).page
= page
; // remember the page location for cleanup
529 debugs(20, 7, "entry " << index
<< " slice " << sid
<< " has " << page
);
531 // link this slice with other entry slices to form a store entry chain
533 assert(lastWritingSlice
< 0);
535 debugs(20, 7, "entry " << index
<< " starts at slice " << sid
);
537 assert(lastWritingSlice
>= 0);
538 map
->writeableSlice(index
, lastWritingSlice
).next
= sid
;
539 debugs(20, 7, "entry " << index
<< " slice " << lastWritingSlice
<<
540 " followed by slice " << sid
);
542 lastWritingSlice
= sid
;
544 const int64_t bufSize
= Ipc::Mem::PageSize();
545 StoreIOBuffer
sharedSpace(bufSize
, offset
,
546 static_cast<char*>(PagePointer(page
)));
548 // check that we kept everything or purge incomplete/sparse cached entry
549 const ssize_t copied
= e
.mem_obj
->data_hdr
.copy(sharedSpace
);
551 debugs(20, 2, "Failed to mem-cache " << e
<< " using " <<
552 bufSize
<< " bytes from " << offset
<< " in " << page
);
556 debugs(20, 7, "mem-cached " << copied
<< " bytes of " << e
<<
557 " from " << offset
<< " to " << page
);
559 Ipc::StoreMapSlice
&slice
= map
->writeableSlice(index
, sid
);
567 /// finds a slot and a free page to fill or throws
569 MemStore::reserveSapForWriting(Ipc::Mem::PageId
&page
)
571 Ipc::Mem::PageId slot
;
572 if (freeSlots
->pop(slot
)) {
573 debugs(20, 5, "got a previously free slot: " << slot
);
575 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage
, page
)) {
576 debugs(20, 5, "and got a previously free page: " << page
);
577 return slot
.number
- 1;
579 debugs(20, 3, "but there is no free page, returning " << slot
);
580 freeSlots
->push(slot
);
584 // catch free slots delivered to noteFreeMapSlice()
586 waitingFor
.slot
= &slot
;
587 waitingFor
.page
= &page
;
588 if (map
->purgeOne()) {
589 assert(!waitingFor
); // noteFreeMapSlice() should have cleared it
592 debugs(20, 5, "got previously busy " << slot
<< " and " << page
);
593 return slot
.number
- 1;
595 assert(waitingFor
.slot
== &slot
&& waitingFor
.page
== &page
);
596 waitingFor
.slot
= NULL
;
597 waitingFor
.page
= NULL
;
599 debugs(47, 3, "cannot get a slice; entries: " << map
->entryCount());
600 throw TexcHere("ran out of mem-cache slots");
604 MemStore::noteFreeMapSlice(const sfileno sliceId
)
606 Ipc::Mem::PageId
&pageId
= map
->extras(sliceId
).page
;
607 debugs(20, 9, "slice " << sliceId
<< " freed " << pageId
);
609 Ipc::Mem::PageId slotId
;
610 slotId
.pool
= SpacePoolId
;
611 slotId
.number
= sliceId
+ 1;
613 // must zero pageId before we give slice (and pageId extras!) to others
614 Ipc::Mem::PutPage(pageId
);
615 freeSlots
->push(slotId
);
617 *waitingFor
.slot
= slotId
;
618 *waitingFor
.page
= pageId
;
619 waitingFor
.slot
= NULL
;
620 waitingFor
.page
= NULL
;
621 pageId
= Ipc::Mem::PageId();
626 MemStore::unlink(StoreEntry
&e
)
629 if (e
.mem_obj
->mem_index
>= 0) {
630 map
->freeEntry(e
.mem_obj
->mem_index
);
633 map
->freeEntryByKey(reinterpret_cast<cache_key
*>(e
.key
));
635 e
.destroyMemObject();
639 MemStore::disconnect(StoreEntry
&e
)
642 if (e
.mem_obj
->mem_index
>= 0) {
643 map
->abortIo(e
.mem_obj
->mem_index
);
644 e
.mem_obj
->mem_index
= -1;
648 /// calculates maximum number of entries we need to store and map
650 MemStore::EntryLimit()
652 if (!Config
.memShared
|| !Config
.memMaxSize
)
653 return 0; // no memory cache configured
655 const int64_t minEntrySize
= Ipc::Mem::PageSize();
656 const int64_t entryLimit
= Config
.memMaxSize
/ minEntrySize
;
660 /// reports our needs for shared memory pages to Ipc::Mem::Pages
661 class MemStoreClaimMemoryNeedsRr
: public RegisteredRunner
664 /* RegisteredRunner API */
665 virtual void run(const RunnerRegistry
&r
);
668 RunnerRegistrationEntry(rrClaimMemoryNeeds
, MemStoreClaimMemoryNeedsRr
);
671 MemStoreClaimMemoryNeedsRr::run(const RunnerRegistry
&)
673 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage
, MemStore::EntryLimit());
676 /// decides whether to use a shared memory cache or checks its configuration
677 class MemStoreCfgRr
: public ::RegisteredRunner
680 /* RegisteredRunner API */
681 virtual void run(const RunnerRegistry
&);
684 RunnerRegistrationEntry(rrFinalizeConfig
, MemStoreCfgRr
);
686 void MemStoreCfgRr::run(const RunnerRegistry
&r
)
688 // decide whether to use a shared memory cache if the user did not specify
689 if (!Config
.memShared
.configured()) {
690 Config
.memShared
.configure(Ipc::Atomic::Enabled() &&
691 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
692 Config
.memMaxSize
> 0);
693 } else if (Config
.memShared
&& !Ipc::Atomic::Enabled()) {
694 // bail if the user wants shared memory cache but we cannot support it
695 fatal("memory_cache_shared is on, but no support for atomic operations detected");
696 } else if (Config
.memShared
&& !Ipc::Mem::Segment::Enabled()) {
697 fatal("memory_cache_shared is on, but no support for shared memory detected");
698 } else if (Config
.memShared
&& !UsingSmp()) {
699 debugs(20, DBG_IMPORTANT
, "WARNING: memory_cache_shared is on, but only"
700 " a single worker is running");
704 /// initializes shared memory segments used by MemStore
705 class MemStoreRr
: public Ipc::Mem::RegisteredRunner
708 /* RegisteredRunner API */
709 MemStoreRr(): spaceOwner(NULL
), mapOwner(NULL
) {}
710 virtual void run(const RunnerRegistry
&);
711 virtual ~MemStoreRr();
714 virtual void create(const RunnerRegistry
&);
717 Ipc::Mem::Owner
<Ipc::Mem::PageStack
> *spaceOwner
; ///< free slices Owner
718 MemStoreMap::Owner
*mapOwner
; ///< primary map Owner
721 RunnerRegistrationEntry(rrAfterConfig
, MemStoreRr
);
723 void MemStoreRr::run(const RunnerRegistry
&r
)
725 assert(Config
.memShared
.configured());
726 Ipc::Mem::RegisteredRunner::run(r
);
729 void MemStoreRr::create(const RunnerRegistry
&)
731 if (!Config
.memShared
)
734 const int64_t entryLimit
= MemStore::EntryLimit();
735 if (entryLimit
<= 0) {
736 if (Config
.memMaxSize
> 0) {
737 debugs(20, DBG_IMPORTANT
, "WARNING: mem-cache size is too small ("
738 << (Config
.memMaxSize
/ 1024.0) << " KB), should be >= " <<
739 (Ipc::Mem::PageSize() / 1024.0) << " KB");
741 return; // no memory cache configured or a misconfiguration
745 spaceOwner
= shm_new(Ipc::Mem::PageStack
)(SpaceLabel
, SpacePoolId
,
747 sizeof(Ipc::Mem::PageId
));
749 mapOwner
= MemStoreMap::Init(MapLabel
, entryLimit
);
752 MemStoreRr::~MemStoreRr()