2 * Copyright (C) 1996-2021 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 20 Memory Cache */
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
19 #include "mime_header.h"
20 #include "SquidConfig.h"
21 #include "SquidMath.h"
22 #include "StoreStats.h"
25 /// shared memory segment path to use for MemStore maps
26 static const SBuf
MapLabel("cache_mem_map");
27 /// shared memory segment path to use for the free slices index
28 static const char *SpaceLabel
= "cache_mem_space";
29 /// shared memory segment path to use for IDs of shared pages with slice data
30 static const char *ExtrasLabel
= "cache_mem_ex";
31 // TODO: sync with Rock::SwapDir::*Path()
33 /// Packs to shared memory, allocating new slots/pages as needed.
34 /// Requires an Ipc::StoreMapAnchor locked for writing.
35 class ShmWriter
: public Packable
38 ShmWriter(MemStore
&aStore
, StoreEntry
*anEntry
, const sfileno aFileNo
, Ipc::StoreMapSliceId aFirstSlice
= -1);
41 virtual void append(const char *aBuf
, int aSize
) override
;
42 virtual void vappendf(const char *fmt
, va_list ap
) override
;
45 StoreEntry
*entry
; ///< the entry being updated
47 /// the slot keeping the first byte of the appended content (at least)
48 /// either set via constructor parameter or allocated by the first append
49 Ipc::StoreMapSliceId firstSlice
;
51 /// the slot keeping the last byte of the appended content (at least)
52 Ipc::StoreMapSliceId lastSlice
;
54 uint64_t totalWritten
; ///< cumulative number of bytes appended so far
58 void copyToShmSlice(Ipc::StoreMap::Slice
&slice
);
64 /* set by (and only valid during) append calls */
65 const char *buf
; ///< content being appended now
66 int bufSize
; ///< buf size
67 int bufWritten
; ///< buf bytes appended so far
72 ShmWriter::ShmWriter(MemStore
&aStore
, StoreEntry
*anEntry
, const sfileno aFileNo
, Ipc::StoreMapSliceId aFirstSlice
):
74 firstSlice(aFirstSlice
),
75 lastSlice(firstSlice
),
87 ShmWriter::append(const char *aBuf
, int aBufSize
)
103 ShmWriter::vappendf(const char *fmt
, va_list ap
)
108 vaBuf
.vappendf(fmt
, apCopy
);
110 append(vaBuf
.rawContent(), vaBuf
.length());
113 /// copies the entire buffer to shared memory
115 ShmWriter::copyToShm()
117 Must(bufSize
> 0); // do not use up shared memory pages for nothing
118 Must(firstSlice
< 0 || lastSlice
>= 0);
120 // fill, skip slices that are already full
121 while (bufWritten
< bufSize
) {
122 Ipc::StoreMap::Slice
&slice
= store
.nextAppendableSlice(fileNo
, lastSlice
);
124 firstSlice
= lastSlice
;
125 copyToShmSlice(slice
);
128 debugs(20, 7, "stored " << bufWritten
<< '/' << totalWritten
<< " header bytes of " << *entry
);
131 /// copies at most one slice worth of buffer to shared memory
133 ShmWriter::copyToShmSlice(Ipc::StoreMap::Slice
&slice
)
135 Ipc::Mem::PageId page
= store
.pageForSlice(lastSlice
);
136 debugs(20, 7, "entry " << *entry
<< " slice " << lastSlice
<< " has " <<
139 Must(bufWritten
<= bufSize
);
140 const int64_t writingDebt
= bufSize
- bufWritten
;
141 const int64_t pageSize
= Ipc::Mem::PageSize();
142 const int64_t sliceOffset
= totalWritten
% pageSize
;
143 const int64_t copySize
= std::min(writingDebt
, pageSize
- sliceOffset
);
144 memcpy(static_cast<char*>(PagePointer(page
)) + sliceOffset
, buf
+ bufWritten
,
147 debugs(20, 7, "copied " << slice
.size
<< '+' << copySize
<< " bytes of " <<
148 entry
<< " from " << sliceOffset
<< " in " << page
);
150 slice
.size
+= copySize
;
151 bufWritten
+= copySize
;
152 totalWritten
+= copySize
;
153 // fresh anchor.basics.swap_file_sz is already set [to the stale value]
155 // either we wrote everything or we filled the entire slice
156 Must(bufWritten
== bufSize
|| sliceOffset
+ copySize
== pageSize
);
161 MemStore::MemStore(): map(NULL
), lastWritingSlice(-1)
165 MemStore::~MemStore()
173 const int64_t entryLimit
= EntryLimit();
175 return; // no shared memory cache configured or a misconfiguration
177 // check compatibility with the disk cache, if any
178 if (Config
.cacheSwap
.n_configured
> 0) {
179 const int64_t diskMaxSize
= Store::Root().maxObjectSize();
180 const int64_t memMaxSize
= maxObjectSize();
181 if (diskMaxSize
== -1) {
182 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
183 "is unlimited but mem-cache maximum object size is " <<
184 memMaxSize
/ 1024.0 << " KB");
185 } else if (diskMaxSize
> memMaxSize
) {
186 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
187 "is too large for mem-cache: " <<
188 diskMaxSize
/ 1024.0 << " KB > " <<
189 memMaxSize
/ 1024.0 << " KB");
193 freeSlots
= shm_old(Ipc::Mem::PageStack
)(SpaceLabel
);
194 extras
= shm_old(Extras
)(ExtrasLabel
);
197 map
= new MemStoreMap(MapLabel
);
202 MemStore::getStats(StoreInfoStats
&stats
) const
204 const size_t pageSize
= Ipc::Mem::PageSize();
206 stats
.mem
.shared
= true;
208 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
210 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
211 stats
.mem
.count
= currentCount();
215 MemStore::stat(StoreEntry
&e
) const
217 storeAppendPrintf(&e
, "\n\nShared Memory Cache\n");
219 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
220 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
221 currentSize() / 1024.0,
222 Math::doublePercent(currentSize(), maxSize()));
225 const int entryLimit
= map
->entryLimit();
226 const int slotLimit
= map
->sliceLimit();
227 storeAppendPrintf(&e
, "Maximum entries: %9d\n", entryLimit
);
228 if (entryLimit
> 0) {
229 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
230 currentCount(), (100.0 * currentCount() / entryLimit
));
233 storeAppendPrintf(&e
, "Maximum slots: %9d\n", slotLimit
);
235 const unsigned int slotsFree
=
236 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage
);
237 if (slotsFree
<= static_cast<const unsigned int>(slotLimit
)) {
238 const int usedSlots
= slotLimit
- static_cast<const int>(slotsFree
);
239 storeAppendPrintf(&e
, "Used slots: %9d %.2f%%\n",
240 usedSlots
, (100.0 * usedSlots
/ slotLimit
));
243 if (slotLimit
< 100) { // XXX: otherwise too expensive to count
244 Ipc::ReadWriteLockStats stats
;
245 map
->updateStats(stats
);
258 MemStore::minSize() const
260 return 0; // XXX: irrelevant, but Store parent forces us to implement this
264 MemStore::maxSize() const
266 return Config
.memMaxSize
;
270 MemStore::currentSize() const
272 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) *
273 Ipc::Mem::PageSize();
277 MemStore::currentCount() const
279 return map
? map
->entryCount() : 0;
283 MemStore::maxObjectSize() const
285 return min(Config
.Store
.maxInMemObjSize
, Config
.memMaxSize
);
289 MemStore::reference(StoreEntry
&)
294 MemStore::dereference(StoreEntry
&)
296 // no need to keep e in the global store_table for us; we have our own map
301 MemStore::get(const cache_key
*key
)
307 const Ipc::StoreMapAnchor
*const slot
= map
->openForReading(key
, index
);
311 // create a brand new store entry and initialize it with stored info
312 StoreEntry
*e
= new StoreEntry();
314 // XXX: We do not know the URLs yet, only the key, but we need to parse and
315 // store the response for the Root().find() callers to be happy because they
316 // expect IN_MEMORY entries to already have the response headers and body.
317 e
->createMemObject();
319 anchorEntry(*e
, index
, *slot
);
321 const bool copied
= copyFromShm(*e
, index
, *slot
);
326 debugs(20, 3, "failed for " << *e
);
327 map
->freeEntry(index
); // do not let others into the same trap
328 destroyStoreEntry(static_cast<hash_link
*>(e
));
333 MemStore::updateHeaders(StoreEntry
*updatedE
)
338 Ipc::StoreMapUpdate
update(updatedE
);
340 assert(updatedE
->mem_obj
);
341 if (!map
->openForUpdating(update
, updatedE
->mem_obj
->memCache
.index
))
345 updateHeadersOrThrow(update
);
346 } catch (const std::exception
&ex
) {
347 debugs(20, 2, "error starting to update entry " << *updatedE
<< ": " << ex
.what());
348 map
->abortUpdating(update
);
353 MemStore::updateHeadersOrThrow(Ipc::StoreMapUpdate
&update
)
355 // our +/- hdr_sz math below does not work if the chains differ [in size]
356 Must(update
.stale
.anchor
->basics
.swap_file_sz
== update
.fresh
.anchor
->basics
.swap_file_sz
);
358 const uint64_t staleHdrSz
= update
.entry
->mem().baseReply().hdr_sz
;
359 debugs(20, 7, "stale hdr_sz: " << staleHdrSz
);
361 /* we will need to copy same-slice payload after the stored headers later */
362 Must(staleHdrSz
> 0);
363 update
.stale
.splicingPoint
= map
->sliceContaining(update
.stale
.fileNo
, staleHdrSz
);
364 Must(update
.stale
.splicingPoint
>= 0);
365 Must(update
.stale
.anchor
->basics
.swap_file_sz
>= staleHdrSz
);
367 Must(update
.stale
.anchor
);
368 ShmWriter
writer(*this, update
.entry
, update
.fresh
.fileNo
);
369 update
.entry
->mem().freshestReply().packHeadersUsingSlowPacker(writer
);
370 const uint64_t freshHdrSz
= writer
.totalWritten
;
371 debugs(20, 7, "fresh hdr_sz: " << freshHdrSz
<< " diff: " << (freshHdrSz
- staleHdrSz
));
373 /* copy same-slice payload remaining after the stored headers */
374 const Ipc::StoreMapSlice
&slice
= map
->readableSlice(update
.stale
.fileNo
, update
.stale
.splicingPoint
);
375 const Ipc::StoreMapSlice::Size sliceCapacity
= Ipc::Mem::PageSize();
376 const Ipc::StoreMapSlice::Size headersInLastSlice
= staleHdrSz
% sliceCapacity
;
377 Must(headersInLastSlice
> 0); // or sliceContaining() would have stopped earlier
378 Must(slice
.size
>= headersInLastSlice
);
379 const Ipc::StoreMapSlice::Size payloadInLastSlice
= slice
.size
- headersInLastSlice
;
380 const MemStoreMapExtras::Item
&extra
= extras
->items
[update
.stale
.splicingPoint
];
381 char *page
= static_cast<char*>(PagePointer(extra
.page
));
382 debugs(20, 5, "appending same-slice payload: " << payloadInLastSlice
);
383 writer
.append(page
+ headersInLastSlice
, payloadInLastSlice
);
384 update
.fresh
.splicingPoint
= writer
.lastSlice
;
386 update
.fresh
.anchor
->basics
.swap_file_sz
-= staleHdrSz
;
387 update
.fresh
.anchor
->basics
.swap_file_sz
+= freshHdrSz
;
389 map
->closeForUpdating(update
);
393 MemStore::anchorToCache(StoreEntry
&entry
, bool &inSync
)
399 const Ipc::StoreMapAnchor
*const slot
= map
->openForReading(
400 reinterpret_cast<cache_key
*>(entry
.key
), index
);
404 anchorEntry(entry
, index
, *slot
);
405 inSync
= updateAnchoredWith(entry
, index
, *slot
);
406 return true; // even if inSync is false
410 MemStore::updateAnchored(StoreEntry
&entry
)
415 assert(entry
.mem_obj
);
416 assert(entry
.hasMemStore());
417 const sfileno index
= entry
.mem_obj
->memCache
.index
;
418 const Ipc::StoreMapAnchor
&anchor
= map
->readableEntry(index
);
419 return updateAnchoredWith(entry
, index
, anchor
);
422 /// updates Transients entry after its anchor has been located
424 MemStore::updateAnchoredWith(StoreEntry
&entry
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
426 entry
.swap_file_sz
= anchor
.basics
.swap_file_sz
;
427 const bool copied
= copyFromShm(entry
, index
, anchor
);
431 /// anchors StoreEntry to an already locked map entry
433 MemStore::anchorEntry(StoreEntry
&e
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
435 assert(!e
.hasDisk()); // no conflict with disk entry basics
436 anchor
.exportInto(e
);
439 if (anchor
.complete()) {
440 e
.store_status
= STORE_OK
;
441 e
.mem_obj
->object_sz
= e
.swap_file_sz
;
442 e
.setMemStatus(IN_MEMORY
);
444 e
.store_status
= STORE_PENDING
;
445 assert(e
.mem_obj
->object_sz
< 0);
446 e
.setMemStatus(NOT_IN_MEMORY
);
449 EBIT_SET(e
.flags
, ENTRY_VALIDATED
);
451 MemObject::MemCache
&mc
= e
.mem_obj
->memCache
;
453 mc
.io
= MemObject::ioReading
;
456 /// copies the entire entry from shared to local memory
458 MemStore::copyFromShm(StoreEntry
&e
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
460 debugs(20, 7, "mem-loading entry " << index
<< " from " << anchor
.start
);
463 // emulate the usual Store code but w/o inapplicable checks and callbacks:
465 Ipc::StoreMapSliceId sid
= anchor
.start
; // optimize: remember the last sid
466 bool wasEof
= anchor
.complete() && sid
< 0;
467 int64_t sliceOffset
= 0;
469 const Ipc::StoreMapSlice
&slice
= map
->readableSlice(index
, sid
);
470 // slice state may change during copying; take snapshots now
471 wasEof
= anchor
.complete() && slice
.next
< 0;
472 const Ipc::StoreMapSlice::Size wasSize
= slice
.size
;
474 debugs(20, 8, "entry " << index
<< " slice " << sid
<< " eof " <<
475 wasEof
<< " wasSize " << wasSize
<< " <= " <<
476 anchor
.basics
.swap_file_sz
<< " sliceOffset " << sliceOffset
<<
477 " mem.endOffset " << e
.mem_obj
->endOffset());
479 if (e
.mem_obj
->endOffset() < sliceOffset
+ wasSize
) {
480 // size of the slice data that we already copied
481 const size_t prefixSize
= e
.mem_obj
->endOffset() - sliceOffset
;
482 assert(prefixSize
<= wasSize
);
484 const MemStoreMapExtras::Item
&extra
= extras
->items
[sid
];
486 char *page
= static_cast<char*>(PagePointer(extra
.page
));
487 const StoreIOBuffer
sliceBuf(wasSize
- prefixSize
,
488 e
.mem_obj
->endOffset(),
490 if (!copyFromShmSlice(e
, sliceBuf
, wasEof
))
492 debugs(20, 8, "entry " << index
<< " copied slice " << sid
<<
493 " from " << extra
.page
<< '+' << prefixSize
);
495 // else skip a [possibly incomplete] slice that we copied earlier
497 // careful: the slice may have grown _and_ gotten the next slice ID!
498 if (slice
.next
>= 0) {
500 // here we know that slice.size may not change any more
501 if (wasSize
>= slice
.size
) { // did not grow since we started copying
502 sliceOffset
+= wasSize
;
505 } else if (wasSize
>= slice
.size
) { // did not grow
511 debugs(20, 7, "mem-loaded " << e
.mem_obj
->endOffset() << '/' <<
512 anchor
.basics
.swap_file_sz
<< " bytes of " << e
);
516 debugs(20, 5, "mem-loaded all " << e
.mem_obj
->endOffset() << '/' <<
517 anchor
.basics
.swap_file_sz
<< " bytes of " << e
);
519 // from StoreEntry::complete()
520 e
.mem_obj
->object_sz
= e
.mem_obj
->endOffset();
521 e
.store_status
= STORE_OK
;
522 e
.setMemStatus(IN_MEMORY
);
524 assert(e
.mem_obj
->object_sz
>= 0);
525 assert(static_cast<uint64_t>(e
.mem_obj
->object_sz
) == anchor
.basics
.swap_file_sz
);
526 // would be nice to call validLength() here, but it needs e.key
528 // we read the entire response into the local memory; no more need to lock
533 /// imports one shared memory slice into local memory
535 MemStore::copyFromShmSlice(StoreEntry
&e
, const StoreIOBuffer
&buf
, bool eof
)
537 debugs(20, 7, "buf: " << buf
.offset
<< " + " << buf
.length
);
539 // from store_client::readBody()
540 // parse headers if needed; they might span multiple slices!
541 const auto rep
= &e
.mem().adjustableBaseReply();
542 if (rep
->pstate
< Http::Message::psParsed
) {
543 // XXX: have to copy because httpMsgParseStep() requires 0-termination
545 mb
.init(buf
.length
+1, buf
.length
+1);
546 mb
.append(buf
.data
, buf
.length
);
548 const int result
= rep
->httpMsgParseStep(mb
.buf
, buf
.length
, eof
);
550 assert(rep
->pstate
== Http::Message::psParsed
);
551 } else if (result
< 0) {
552 debugs(20, DBG_IMPORTANT
, "Corrupted mem-cached headers: " << e
);
554 } else { // more slices are needed
558 debugs(20, 7, "rep pstate: " << rep
->pstate
);
560 // local memory stores both headers and body so copy regardless of pstate
561 const int64_t offBefore
= e
.mem_obj
->endOffset();
562 assert(e
.mem_obj
->data_hdr
.write(buf
)); // from MemObject::write()
563 const int64_t offAfter
= e
.mem_obj
->endOffset();
564 // expect to write the entire buf because StoreEntry::write() never fails
565 assert(offAfter
>= 0 && offBefore
<= offAfter
&&
566 static_cast<size_t>(offAfter
- offBefore
) == buf
.length
);
570 /// whether we should cache the entry
572 MemStore::shouldCache(StoreEntry
&e
) const
574 if (e
.mem_status
== IN_MEMORY
) {
575 debugs(20, 5, "already loaded from mem-cache: " << e
);
579 if (e
.mem_obj
&& e
.mem_obj
->memCache
.offset
> 0) {
580 debugs(20, 5, "already written to mem-cache: " << e
);
584 if (!e
.memoryCachable()) {
585 debugs(20, 7, HERE
<< "Not memory cachable: " << e
);
586 return false; // will not cache due to entry state or properties
591 if (!e
.mem_obj
->vary_headers
.isEmpty()) {
592 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
593 debugs(20, 5, "Vary not yet supported: " << e
.mem_obj
->vary_headers
);
597 const int64_t expectedSize
= e
.mem_obj
->expectedReplySize(); // may be < 0
598 const int64_t loadedSize
= e
.mem_obj
->endOffset();
599 const int64_t ramSize
= max(loadedSize
, expectedSize
);
600 if (ramSize
> maxObjectSize()) {
601 debugs(20, 5, HERE
<< "Too big max(" <<
602 loadedSize
<< ", " << expectedSize
<< "): " << e
);
603 return false; // will not cache due to cachable entry size limits
606 if (!e
.mem_obj
->isContiguous()) {
607 debugs(20, 5, "not contiguous");
612 debugs(20, 5, HERE
<< "No map to mem-cache " << e
);
616 if (EBIT_TEST(e
.flags
, ENTRY_SPECIAL
)) {
617 debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e
);
624 /// locks map anchor and preps to store the entry in shared memory
626 MemStore::startCaching(StoreEntry
&e
)
629 Ipc::StoreMapAnchor
*slot
= map
->openForWriting(reinterpret_cast<const cache_key
*>(e
.key
), index
);
631 debugs(20, 5, HERE
<< "No room in mem-cache map to index " << e
);
636 e
.mem_obj
->memCache
.index
= index
;
637 e
.mem_obj
->memCache
.io
= MemObject::ioWriting
;
639 // Do not allow others to feed off an unknown-size entry because we will
640 // stop swapping it out if it grows too large.
641 if (e
.mem_obj
->expectedReplySize() >= 0)
642 map
->startAppending(index
);
643 e
.memOutDecision(true);
647 /// copies all local data to shared memory
649 MemStore::copyToShm(StoreEntry
&e
)
653 Must(!EBIT_TEST(e
.flags
, ENTRY_FWD_HDR_WAIT
));
655 const int64_t eSize
= e
.mem_obj
->endOffset();
656 if (e
.mem_obj
->memCache
.offset
>= eSize
) {
657 debugs(20, 5, "postponing copying " << e
<< " for lack of news: " <<
658 e
.mem_obj
->memCache
.offset
<< " >= " << eSize
);
659 return; // nothing to do (yet)
662 // throw if an accepted unknown-size entry grew too big or max-size changed
663 Must(eSize
<= maxObjectSize());
665 const int32_t index
= e
.mem_obj
->memCache
.index
;
667 Ipc::StoreMapAnchor
&anchor
= map
->writeableEntry(index
);
668 lastWritingSlice
= anchor
.start
;
670 // fill, skip slices that are already full
671 // Optimize: remember lastWritingSlice in e.mem_obj
672 while (e
.mem_obj
->memCache
.offset
< eSize
) {
673 Ipc::StoreMap::Slice
&slice
= nextAppendableSlice(
674 e
.mem_obj
->memCache
.index
, lastWritingSlice
);
675 if (anchor
.start
< 0)
676 anchor
.start
= lastWritingSlice
;
677 copyToShmSlice(e
, anchor
, slice
);
680 debugs(20, 7, "mem-cached available " << eSize
<< " bytes of " << e
);
683 /// copies at most one slice worth of local memory to shared memory
685 MemStore::copyToShmSlice(StoreEntry
&e
, Ipc::StoreMapAnchor
&anchor
, Ipc::StoreMap::Slice
&slice
)
687 Ipc::Mem::PageId page
= pageForSlice(lastWritingSlice
);
688 debugs(20, 7, "entry " << e
<< " slice " << lastWritingSlice
<< " has " <<
691 const int64_t bufSize
= Ipc::Mem::PageSize();
692 const int64_t sliceOffset
= e
.mem_obj
->memCache
.offset
% bufSize
;
693 StoreIOBuffer
sharedSpace(bufSize
- sliceOffset
, e
.mem_obj
->memCache
.offset
,
694 static_cast<char*>(PagePointer(page
)) + sliceOffset
);
696 // check that we kept everything or purge incomplete/sparse cached entry
697 const ssize_t copied
= e
.mem_obj
->data_hdr
.copy(sharedSpace
);
699 debugs(20, 2, "Failed to mem-cache " << (bufSize
- sliceOffset
) <<
700 " bytes of " << e
<< " from " << e
.mem_obj
->memCache
.offset
<<
702 throw TexcHere("data_hdr.copy failure");
705 debugs(20, 7, "mem-cached " << copied
<< " bytes of " << e
<<
706 " from " << e
.mem_obj
->memCache
.offset
<< " in " << page
);
708 slice
.size
+= copied
;
709 e
.mem_obj
->memCache
.offset
+= copied
;
710 anchor
.basics
.swap_file_sz
= e
.mem_obj
->memCache
.offset
;
713 /// starts checking with the entry chain slice at a given offset and
714 /// returns a not-full (but not necessarily empty) slice, updating sliceOffset
715 Ipc::StoreMap::Slice
&
716 MemStore::nextAppendableSlice(const sfileno fileNo
, sfileno
&sliceOffset
)
718 // allocate the very first slot for the entry if needed
719 if (sliceOffset
< 0) {
720 Ipc::StoreMapAnchor
&anchor
= map
->writeableEntry(fileNo
);
721 Must(anchor
.start
< 0);
722 Ipc::Mem::PageId page
;
723 sliceOffset
= reserveSapForWriting(page
); // throws
724 extras
->items
[sliceOffset
].page
= page
;
725 anchor
.start
= sliceOffset
;
728 const size_t sliceCapacity
= Ipc::Mem::PageSize();
730 Ipc::StoreMap::Slice
&slice
= map
->writeableSlice(fileNo
, sliceOffset
);
732 if (slice
.size
>= sliceCapacity
) {
733 if (slice
.next
>= 0) {
734 sliceOffset
= slice
.next
;
738 Ipc::Mem::PageId page
;
739 slice
.next
= sliceOffset
= reserveSapForWriting(page
);
740 extras
->items
[sliceOffset
].page
= page
;
741 debugs(20, 7, "entry " << fileNo
<< " new slice: " << sliceOffset
);
742 continue; // to get and return the slice at the new sliceOffset
750 /// safely returns a previously allocated memory page for the given entry slice
752 MemStore::pageForSlice(Ipc::StoreMapSliceId sliceId
)
756 Ipc::Mem::PageId page
= extras
->items
[sliceId
].page
;
761 /// finds a slot and a free page to fill or throws
763 MemStore::reserveSapForWriting(Ipc::Mem::PageId
&page
)
765 Ipc::Mem::PageId slot
;
766 if (freeSlots
->pop(slot
)) {
767 const auto slotId
= slot
.number
- 1;
768 debugs(20, 5, "got a previously free slot: " << slotId
);
770 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage
, page
)) {
771 debugs(20, 5, "and got a previously free page: " << page
);
772 map
->prepFreeSlice(slotId
);
775 debugs(20, 3, "but there is no free page, returning " << slotId
);
776 freeSlots
->push(slot
);
780 // catch free slots delivered to noteFreeMapSlice()
782 waitingFor
.slot
= &slot
;
783 waitingFor
.page
= &page
;
784 if (map
->purgeOne()) {
785 assert(!waitingFor
); // noteFreeMapSlice() should have cleared it
788 const auto slotId
= slot
.number
- 1;
789 map
->prepFreeSlice(slotId
);
790 debugs(20, 5, "got previously busy " << slotId
<< " and " << page
);
793 assert(waitingFor
.slot
== &slot
&& waitingFor
.page
== &page
);
794 waitingFor
.slot
= NULL
;
795 waitingFor
.page
= NULL
;
797 debugs(47, 3, "cannot get a slice; entries: " << map
->entryCount());
798 throw TexcHere("ran out of mem-cache slots");
802 MemStore::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId
)
804 Ipc::Mem::PageId
&pageId
= extras
->items
[sliceId
].page
;
805 debugs(20, 9, "slice " << sliceId
<< " freed " << pageId
);
807 Ipc::Mem::PageId slotId
;
808 slotId
.pool
= Ipc::Mem::PageStack::IdForMemStoreSpace();
809 slotId
.number
= sliceId
+ 1;
811 // must zero pageId before we give slice (and pageId extras!) to others
812 Ipc::Mem::PutPage(pageId
);
813 freeSlots
->push(slotId
);
815 *waitingFor
.slot
= slotId
;
816 *waitingFor
.page
= pageId
;
817 waitingFor
.slot
= NULL
;
818 waitingFor
.page
= NULL
;
819 pageId
= Ipc::Mem::PageId();
824 MemStore::write(StoreEntry
&e
)
828 debugs(20, 7, "entry " << e
);
830 switch (e
.mem_obj
->memCache
.io
) {
831 case MemObject::ioUndecided
:
832 if (!shouldCache(e
) || !startCaching(e
)) {
833 e
.mem_obj
->memCache
.io
= MemObject::ioDone
;
834 e
.memOutDecision(false);
839 case MemObject::ioDone
:
840 case MemObject::ioReading
:
841 return; // we should not write in all of the above cases
843 case MemObject::ioWriting
:
844 break; // already decided to write and still writing
849 if (e
.store_status
== STORE_OK
) // done receiving new content
852 CollapsedForwarding::Broadcast(e
);
854 } catch (const std::exception
&x
) { // TODO: should we catch ... as well?
855 debugs(20, 2, "mem-caching error writing entry " << e
<< ": " << x
.what());
856 // fall through to the error handling code
863 MemStore::completeWriting(StoreEntry
&e
)
866 const int32_t index
= e
.mem_obj
->memCache
.index
;
870 debugs(20, 5, "mem-cached all " << e
.mem_obj
->memCache
.offset
<< " bytes of " << e
);
872 e
.mem_obj
->memCache
.index
= -1;
873 e
.mem_obj
->memCache
.io
= MemObject::ioDone
;
874 map
->closeForWriting(index
);
876 CollapsedForwarding::Broadcast(e
); // before we close our transient entry!
877 Store::Root().transientsCompleteWriting(e
);
881 MemStore::evictCached(StoreEntry
&e
)
884 if (e
.hasMemStore()) {
885 if (map
->freeEntry(e
.mem_obj
->memCache
.index
))
886 CollapsedForwarding::Broadcast(e
);
889 e
.destroyMemObject();
891 } else if (const auto key
= e
.publicKey()) {
892 // the entry may have been loaded and then disconnected from the cache
895 e
.destroyMemObject();
900 MemStore::evictIfFound(const cache_key
*key
)
903 map
->freeEntryByKey(key
);
907 MemStore::disconnect(StoreEntry
&e
)
910 MemObject
&mem_obj
= *e
.mem_obj
;
911 if (e
.hasMemStore()) {
912 if (mem_obj
.memCache
.io
== MemObject::ioWriting
) {
913 map
->abortWriting(mem_obj
.memCache
.index
);
914 mem_obj
.memCache
.index
= -1;
915 mem_obj
.memCache
.io
= MemObject::ioDone
;
916 Store::Root().stopSharing(e
); // broadcasts after the change
918 assert(mem_obj
.memCache
.io
== MemObject::ioReading
);
919 map
->closeForReading(mem_obj
.memCache
.index
);
920 mem_obj
.memCache
.index
= -1;
921 mem_obj
.memCache
.io
= MemObject::ioDone
;
927 MemStore::Requested()
929 return Config
.memShared
&& Config
.memMaxSize
> 0;
932 /// calculates maximum number of entries we need to store and map
934 MemStore::EntryLimit()
939 const int64_t minEntrySize
= Ipc::Mem::PageSize();
940 const int64_t entryLimit
= Config
.memMaxSize
/ minEntrySize
;
944 /// reports our needs for shared memory pages to Ipc::Mem::Pages;
945 /// decides whether to use a shared memory cache or checks its configuration;
946 /// and initializes shared memory segments used by MemStore
947 class MemStoreRr
: public Ipc::Mem::RegisteredRunner
950 /* RegisteredRunner API */
951 MemStoreRr(): spaceOwner(NULL
), mapOwner(NULL
), extrasOwner(NULL
) {}
952 virtual void finalizeConfig();
953 virtual void claimMemoryNeeds();
954 virtual void useConfig();
955 virtual ~MemStoreRr();
958 /* Ipc::Mem::RegisteredRunner API */
959 virtual void create();
962 Ipc::Mem::Owner
<Ipc::Mem::PageStack
> *spaceOwner
; ///< free slices Owner
963 MemStoreMap::Owner
*mapOwner
; ///< primary map Owner
964 Ipc::Mem::Owner
<MemStoreMapExtras
> *extrasOwner
; ///< PageIds Owner
967 RunnerRegistrationEntry(MemStoreRr
);
970 MemStoreRr::claimMemoryNeeds()
972 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage
, MemStore::EntryLimit());
976 MemStoreRr::finalizeConfig()
978 // decide whether to use a shared memory cache if the user did not specify
979 if (!Config
.memShared
.configured()) {
980 Config
.memShared
.configure(Ipc::Mem::Segment::Enabled() && UsingSmp() &&
981 Config
.memMaxSize
> 0);
982 } else if (Config
.memShared
&& !Ipc::Mem::Segment::Enabled()) {
983 fatal("memory_cache_shared is on, but no support for shared memory detected");
984 } else if (Config
.memShared
&& !UsingSmp()) {
985 debugs(20, DBG_IMPORTANT
, "WARNING: memory_cache_shared is on, but only"
986 " a single worker is running");
989 if (MemStore::Requested() && Config
.memMaxSize
< Ipc::Mem::PageSize()) {
990 debugs(20, DBG_IMPORTANT
, "WARNING: mem-cache size is too small (" <<
991 (Config
.memMaxSize
/ 1024.0) << " KB), should be >= " <<
992 (Ipc::Mem::PageSize() / 1024.0) << " KB");
997 MemStoreRr::useConfig()
999 assert(Config
.memShared
.configured());
1000 Ipc::Mem::RegisteredRunner::useConfig();
1004 MemStoreRr::create()
1006 if (!MemStore::Enabled())
1009 const int64_t entryLimit
= MemStore::EntryLimit();
1010 assert(entryLimit
> 0);
1012 Ipc::Mem::PageStack::Config spaceConfig
;
1013 spaceConfig
.poolId
= Ipc::Mem::PageStack::IdForMemStoreSpace();
1014 spaceConfig
.pageSize
= 0; // the pages are stored in Ipc::Mem::Pages
1015 spaceConfig
.capacity
= entryLimit
;
1016 spaceConfig
.createFull
= true; // all pages are initially available
1018 spaceOwner
= shm_new(Ipc::Mem::PageStack
)(SpaceLabel
, spaceConfig
);
1020 mapOwner
= MemStoreMap::Init(MapLabel
, entryLimit
);
1022 extrasOwner
= shm_new(MemStoreMapExtras
)(ExtrasLabel
, entryLimit
);
1025 MemStoreRr::~MemStoreRr()