2 * Copyright (C) 1996-2023 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 20 Memory Cache */
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
19 #include "mime_header.h"
20 #include "sbuf/SBuf.h"
21 #include "sbuf/Stream.h"
22 #include "SquidConfig.h"
23 #include "SquidMath.h"
24 #include "StoreStats.h"
27 /// shared memory segment path to use for MemStore maps
28 static const SBuf
MapLabel("cache_mem_map");
29 /// shared memory segment path to use for the free slices index
30 static const char *SpaceLabel
= "cache_mem_space";
31 /// shared memory segment path to use for IDs of shared pages with slice data
32 static const char *ExtrasLabel
= "cache_mem_ex";
33 // TODO: sync with Rock::SwapDir::*Path()
35 /// Packs to shared memory, allocating new slots/pages as needed.
36 /// Requires an Ipc::StoreMapAnchor locked for writing.
37 class ShmWriter
: public Packable
40 ShmWriter(MemStore
&aStore
, StoreEntry
*anEntry
, const sfileno aFileNo
, Ipc::StoreMapSliceId aFirstSlice
= -1);
43 void append(const char *aBuf
, int aSize
) override
;
44 void vappendf(const char *fmt
, va_list ap
) override
;
47 StoreEntry
*entry
; ///< the entry being updated
49 /// the slot keeping the first byte of the appended content (at least)
50 /// either set via constructor parameter or allocated by the first append
51 Ipc::StoreMapSliceId firstSlice
;
53 /// the slot keeping the last byte of the appended content (at least)
54 Ipc::StoreMapSliceId lastSlice
;
56 uint64_t totalWritten
; ///< cumulative number of bytes appended so far
60 void copyToShmSlice(Ipc::StoreMap::Slice
&slice
);
66 /* set by (and only valid during) append calls */
67 const char *buf
; ///< content being appended now
68 int bufSize
; ///< buf size
69 int bufWritten
; ///< buf bytes appended so far
74 ShmWriter::ShmWriter(MemStore
&aStore
, StoreEntry
*anEntry
, const sfileno aFileNo
, Ipc::StoreMapSliceId aFirstSlice
):
76 firstSlice(aFirstSlice
),
77 lastSlice(firstSlice
),
89 ShmWriter::append(const char *aBuf
, int aBufSize
)
105 ShmWriter::vappendf(const char *fmt
, va_list ap
)
110 vaBuf
.vappendf(fmt
, apCopy
);
112 append(vaBuf
.rawContent(), vaBuf
.length());
115 /// copies the entire buffer to shared memory
117 ShmWriter::copyToShm()
119 Must(bufSize
> 0); // do not use up shared memory pages for nothing
120 Must(firstSlice
< 0 || lastSlice
>= 0);
122 // fill, skip slices that are already full
123 while (bufWritten
< bufSize
) {
124 Ipc::StoreMap::Slice
&slice
= store
.nextAppendableSlice(fileNo
, lastSlice
);
126 firstSlice
= lastSlice
;
127 copyToShmSlice(slice
);
130 debugs(20, 7, "stored " << bufWritten
<< '/' << totalWritten
<< " header bytes of " << *entry
);
133 /// copies at most one slice worth of buffer to shared memory
135 ShmWriter::copyToShmSlice(Ipc::StoreMap::Slice
&slice
)
137 Ipc::Mem::PageId page
= store
.pageForSlice(lastSlice
);
138 debugs(20, 7, "entry " << *entry
<< " slice " << lastSlice
<< " has " <<
141 Must(bufWritten
<= bufSize
);
142 const int64_t writingDebt
= bufSize
- bufWritten
;
143 const int64_t pageSize
= Ipc::Mem::PageSize();
144 const int64_t sliceOffset
= totalWritten
% pageSize
;
145 const int64_t copySize
= std::min(writingDebt
, pageSize
- sliceOffset
);
146 memcpy(static_cast<char*>(PagePointer(page
)) + sliceOffset
, buf
+ bufWritten
,
149 debugs(20, 7, "copied " << slice
.size
<< '+' << copySize
<< " bytes of " <<
150 entry
<< " from " << sliceOffset
<< " in " << page
);
152 slice
.size
+= copySize
;
153 bufWritten
+= copySize
;
154 totalWritten
+= copySize
;
155 // fresh anchor.basics.swap_file_sz is already set [to the stale value]
157 // either we wrote everything or we filled the entire slice
158 Must(bufWritten
== bufSize
|| sliceOffset
+ copySize
== pageSize
);
163 MemStore::MemStore(): map(nullptr), lastWritingSlice(-1)
167 MemStore::~MemStore()
175 const int64_t entryLimit
= EntryLimit();
177 return; // no shared memory cache configured or a misconfiguration
179 // check compatibility with the disk cache, if any
180 if (Config
.cacheSwap
.n_configured
> 0) {
181 const int64_t diskMaxSize
= Store::Root().maxObjectSize();
182 const int64_t memMaxSize
= maxObjectSize();
183 if (diskMaxSize
== -1) {
184 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
185 "is unlimited but mem-cache maximum object size is " <<
186 memMaxSize
/ 1024.0 << " KB");
187 } else if (diskMaxSize
> memMaxSize
) {
188 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
189 "is too large for mem-cache: " <<
190 diskMaxSize
/ 1024.0 << " KB > " <<
191 memMaxSize
/ 1024.0 << " KB");
195 freeSlots
= shm_old(Ipc::Mem::PageStack
)(SpaceLabel
);
196 extras
= shm_old(Extras
)(ExtrasLabel
);
199 map
= new MemStoreMap(MapLabel
);
204 MemStore::getStats(StoreInfoStats
&stats
) const
206 const size_t pageSize
= Ipc::Mem::PageSize();
208 stats
.mem
.shared
= true;
210 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
212 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
213 stats
.mem
.count
= currentCount();
217 MemStore::stat(StoreEntry
&e
) const
219 storeAppendPrintf(&e
, "\n\nShared Memory Cache\n");
221 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
222 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
223 currentSize() / 1024.0,
224 Math::doublePercent(currentSize(), maxSize()));
227 const int entryLimit
= map
->entryLimit();
228 const int slotLimit
= map
->sliceLimit();
229 storeAppendPrintf(&e
, "Maximum entries: %9d\n", entryLimit
);
230 if (entryLimit
> 0) {
231 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
232 currentCount(), (100.0 * currentCount() / entryLimit
));
235 storeAppendPrintf(&e
, "Maximum slots: %9d\n", slotLimit
);
237 const unsigned int slotsFree
=
238 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage
);
239 if (slotsFree
<= static_cast<unsigned int>(slotLimit
)) {
240 const int usedSlots
= slotLimit
- static_cast<int>(slotsFree
);
241 storeAppendPrintf(&e
, "Used slots: %9d %.2f%%\n",
242 usedSlots
, (100.0 * usedSlots
/ slotLimit
));
245 if (slotLimit
< 100) { // XXX: otherwise too expensive to count
246 Ipc::ReadWriteLockStats stats
;
247 map
->updateStats(stats
);
260 MemStore::minSize() const
262 return 0; // XXX: irrelevant, but Store parent forces us to implement this
266 MemStore::maxSize() const
268 return Config
.memMaxSize
;
272 MemStore::currentSize() const
274 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) *
275 Ipc::Mem::PageSize();
279 MemStore::currentCount() const
281 return map
? map
->entryCount() : 0;
285 MemStore::maxObjectSize() const
287 return min(Config
.Store
.maxInMemObjSize
, Config
.memMaxSize
);
291 MemStore::reference(StoreEntry
&)
296 MemStore::dereference(StoreEntry
&)
298 // no need to keep e in the global store_table for us; we have our own map
303 MemStore::get(const cache_key
*key
)
309 const Ipc::StoreMapAnchor
*const slot
= map
->openForReading(key
, index
);
313 // create a brand new store entry and initialize it with stored info
314 StoreEntry
*e
= new StoreEntry();
317 // XXX: We do not know the URLs yet, only the key, but we need to parse and
318 // store the response for the Root().find() callers to be happy because they
319 // expect IN_MEMORY entries to already have the response headers and body.
320 e
->createMemObject();
322 anchorEntry(*e
, index
, *slot
);
324 // TODO: make copyFromShm() throw on all failures, simplifying this code
325 if (copyFromShm(*e
, index
, *slot
))
327 debugs(20, 3, "failed for " << *e
);
329 // see store_client::parseHttpHeadersFromDisk() for problems this may log
330 debugs(20, DBG_IMPORTANT
, "ERROR: Cannot load a cache hit from shared memory" <<
331 Debug::Extra
<< "exception: " << CurrentException
<<
332 Debug::Extra
<< "cache_mem entry: " << *e
);
335 map
->freeEntry(index
); // do not let others into the same trap
336 destroyStoreEntry(static_cast<hash_link
*>(e
));
341 MemStore::updateHeaders(StoreEntry
*updatedE
)
346 Ipc::StoreMapUpdate
update(updatedE
);
348 assert(updatedE
->mem_obj
);
349 if (!map
->openForUpdating(update
, updatedE
->mem_obj
->memCache
.index
))
353 updateHeadersOrThrow(update
);
354 } catch (const std::exception
&ex
) {
355 debugs(20, 2, "error starting to update entry " << *updatedE
<< ": " << ex
.what());
356 map
->abortUpdating(update
);
361 MemStore::updateHeadersOrThrow(Ipc::StoreMapUpdate
&update
)
363 // our +/- hdr_sz math below does not work if the chains differ [in size]
364 Must(update
.stale
.anchor
->basics
.swap_file_sz
== update
.fresh
.anchor
->basics
.swap_file_sz
);
366 const uint64_t staleHdrSz
= update
.entry
->mem().baseReply().hdr_sz
;
367 debugs(20, 7, "stale hdr_sz: " << staleHdrSz
);
369 /* we will need to copy same-slice payload after the stored headers later */
370 Must(staleHdrSz
> 0);
371 update
.stale
.splicingPoint
= map
->sliceContaining(update
.stale
.fileNo
, staleHdrSz
);
372 Must(update
.stale
.splicingPoint
>= 0);
373 Must(update
.stale
.anchor
->basics
.swap_file_sz
>= staleHdrSz
);
375 Must(update
.stale
.anchor
);
376 ShmWriter
writer(*this, update
.entry
, update
.fresh
.fileNo
);
377 update
.entry
->mem().freshestReply().packHeadersUsingSlowPacker(writer
);
378 const uint64_t freshHdrSz
= writer
.totalWritten
;
379 debugs(20, 7, "fresh hdr_sz: " << freshHdrSz
<< " diff: " << (freshHdrSz
- staleHdrSz
));
381 /* copy same-slice payload remaining after the stored headers */
382 const Ipc::StoreMapSlice
&slice
= map
->readableSlice(update
.stale
.fileNo
, update
.stale
.splicingPoint
);
383 const Ipc::StoreMapSlice::Size sliceCapacity
= Ipc::Mem::PageSize();
384 const Ipc::StoreMapSlice::Size headersInLastSlice
= staleHdrSz
% sliceCapacity
;
385 Must(headersInLastSlice
> 0); // or sliceContaining() would have stopped earlier
386 Must(slice
.size
>= headersInLastSlice
);
387 const Ipc::StoreMapSlice::Size payloadInLastSlice
= slice
.size
- headersInLastSlice
;
388 const MemStoreMapExtras::Item
&extra
= extras
->items
[update
.stale
.splicingPoint
];
389 char *page
= static_cast<char*>(PagePointer(extra
.page
));
390 debugs(20, 5, "appending same-slice payload: " << payloadInLastSlice
);
391 writer
.append(page
+ headersInLastSlice
, payloadInLastSlice
);
392 update
.fresh
.splicingPoint
= writer
.lastSlice
;
394 update
.fresh
.anchor
->basics
.swap_file_sz
-= staleHdrSz
;
395 update
.fresh
.anchor
->basics
.swap_file_sz
+= freshHdrSz
;
397 map
->closeForUpdating(update
);
401 MemStore::anchorToCache(StoreEntry
&entry
)
403 Assure(!entry
.hasMemStore());
404 Assure(entry
.mem().memCache
.io
!= MemObject::ioDone
);
410 const Ipc::StoreMapAnchor
*const slot
= map
->openForReading(
411 reinterpret_cast<cache_key
*>(entry
.key
), index
);
415 anchorEntry(entry
, index
, *slot
);
416 if (!updateAnchoredWith(entry
, index
, *slot
))
417 throw TextException("updateAnchoredWith() failure", Here());
422 MemStore::updateAnchored(StoreEntry
&entry
)
427 assert(entry
.mem_obj
);
428 assert(entry
.hasMemStore());
429 const sfileno index
= entry
.mem_obj
->memCache
.index
;
430 const Ipc::StoreMapAnchor
&anchor
= map
->readableEntry(index
);
431 return updateAnchoredWith(entry
, index
, anchor
);
434 /// updates Transients entry after its anchor has been located
436 MemStore::updateAnchoredWith(StoreEntry
&entry
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
438 entry
.swap_file_sz
= anchor
.basics
.swap_file_sz
;
439 const bool copied
= copyFromShm(entry
, index
, anchor
);
443 /// anchors StoreEntry to an already locked map entry
445 MemStore::anchorEntry(StoreEntry
&e
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
447 assert(!e
.hasDisk()); // no conflict with disk entry basics
448 anchor
.exportInto(e
);
451 if (anchor
.complete()) {
452 e
.store_status
= STORE_OK
;
453 e
.mem_obj
->object_sz
= e
.swap_file_sz
;
454 e
.setMemStatus(IN_MEMORY
);
456 e
.store_status
= STORE_PENDING
;
457 assert(e
.mem_obj
->object_sz
< 0);
458 e
.setMemStatus(NOT_IN_MEMORY
);
461 EBIT_SET(e
.flags
, ENTRY_VALIDATED
);
463 MemObject::MemCache
&mc
= e
.mem_obj
->memCache
;
465 mc
.io
= MemObject::ioReading
;
468 /// copies the entire entry from shared to local memory
470 MemStore::copyFromShm(StoreEntry
&e
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
472 debugs(20, 7, "mem-loading entry " << index
<< " from " << anchor
.start
);
475 // emulate the usual Store code but w/o inapplicable checks and callbacks:
477 Ipc::StoreMapSliceId sid
= anchor
.start
; // optimize: remember the last sid
478 bool wasEof
= anchor
.complete() && sid
< 0;
479 int64_t sliceOffset
= 0;
481 SBuf httpHeaderParsingBuffer
;
483 const Ipc::StoreMapSlice
&slice
= map
->readableSlice(index
, sid
);
484 // slice state may change during copying; take snapshots now
485 wasEof
= anchor
.complete() && slice
.next
< 0;
486 const Ipc::StoreMapSlice::Size wasSize
= slice
.size
;
488 debugs(20, 8, "entry " << index
<< " slice " << sid
<< " eof " <<
489 wasEof
<< " wasSize " << wasSize
<< " <= " <<
490 anchor
.basics
.swap_file_sz
<< " sliceOffset " << sliceOffset
<<
491 " mem.endOffset " << e
.mem_obj
->endOffset());
493 if (e
.mem_obj
->endOffset() < sliceOffset
+ wasSize
) {
494 // size of the slice data that we already copied
495 const size_t prefixSize
= e
.mem_obj
->endOffset() - sliceOffset
;
496 assert(prefixSize
<= wasSize
);
498 const MemStoreMapExtras::Item
&extra
= extras
->items
[sid
];
500 char *page
= static_cast<char*>(PagePointer(extra
.page
));
501 const StoreIOBuffer
sliceBuf(wasSize
- prefixSize
,
502 e
.mem_obj
->endOffset(),
505 copyFromShmSlice(e
, sliceBuf
);
506 debugs(20, 8, "entry " << index
<< " copied slice " << sid
<<
507 " from " << extra
.page
<< '+' << prefixSize
);
509 // parse headers if needed; they might span multiple slices!
510 if (!e
.hasParsedReplyHeader()) {
511 httpHeaderParsingBuffer
.append(sliceBuf
.data
, sliceBuf
.length
);
512 auto &reply
= e
.mem().adjustableBaseReply();
513 if (reply
.parseTerminatedPrefix(httpHeaderParsingBuffer
.c_str(), httpHeaderParsingBuffer
.length()))
514 httpHeaderParsingBuffer
= SBuf(); // we do not need these bytes anymore
517 // else skip a [possibly incomplete] slice that we copied earlier
519 // careful: the slice may have grown _and_ gotten the next slice ID!
520 if (slice
.next
>= 0) {
522 // here we know that slice.size may not change any more
523 if (wasSize
>= slice
.size
) { // did not grow since we started copying
524 sliceOffset
+= wasSize
;
527 } else if (wasSize
>= slice
.size
) { // did not grow
533 debugs(20, 7, "mem-loaded " << e
.mem_obj
->endOffset() << '/' <<
534 anchor
.basics
.swap_file_sz
<< " bytes of " << e
);
538 if (anchor
.writerHalted
) {
539 debugs(20, 5, "mem-loaded aborted " << e
.mem_obj
->endOffset() << '/' <<
540 anchor
.basics
.swap_file_sz
<< " bytes of " << e
);
544 debugs(20, 5, "mem-loaded all " << e
.mem_obj
->endOffset() << '/' <<
545 anchor
.basics
.swap_file_sz
<< " bytes of " << e
);
547 if (!e
.hasParsedReplyHeader())
548 throw TextException(ToSBuf("truncated mem-cached headers; accumulated: ", httpHeaderParsingBuffer
.length()), Here());
550 // from StoreEntry::complete()
551 e
.mem_obj
->object_sz
= e
.mem_obj
->endOffset();
552 e
.store_status
= STORE_OK
;
553 e
.setMemStatus(IN_MEMORY
);
555 assert(e
.mem_obj
->object_sz
>= 0);
556 assert(static_cast<uint64_t>(e
.mem_obj
->object_sz
) == anchor
.basics
.swap_file_sz
);
557 // would be nice to call validLength() here, but it needs e.key
559 // we read the entire response into the local memory; no more need to lock
564 /// imports one shared memory slice into local memory
566 MemStore::copyFromShmSlice(StoreEntry
&e
, const StoreIOBuffer
&buf
)
568 debugs(20, 7, "buf: " << buf
.offset
<< " + " << buf
.length
);
570 // local memory stores both headers and body so copy regardless of pstate
571 const int64_t offBefore
= e
.mem_obj
->endOffset();
572 assert(e
.mem_obj
->data_hdr
.write(buf
)); // from MemObject::write()
573 const int64_t offAfter
= e
.mem_obj
->endOffset();
574 // expect to write the entire buf because StoreEntry::write() never fails
575 assert(offAfter
>= 0 && offBefore
<= offAfter
&&
576 static_cast<size_t>(offAfter
- offBefore
) == buf
.length
);
579 /// whether we should cache the entry
581 MemStore::shouldCache(StoreEntry
&e
) const
583 if (e
.mem_status
== IN_MEMORY
) {
584 debugs(20, 5, "already loaded from mem-cache: " << e
);
588 if (e
.mem_obj
&& e
.mem_obj
->memCache
.offset
> 0) {
589 debugs(20, 5, "already written to mem-cache: " << e
);
594 debugs(20, 5, "avoid heavy optional work during shutdown: " << e
);
598 // To avoid SMP workers releasing each other caching attempts, restrict disk
599 // caching to StoreEntry publisher. This check goes before memoryCachable()
600 // that may incorrectly release() publisher's entry via checkCachable().
601 if (Store::Root().transientsReader(e
)) {
602 debugs(20, 5, "yield to entry publisher: " << e
);
606 if (!e
.memoryCachable()) {
607 debugs(20, 7, "Not memory cachable: " << e
);
608 return false; // will not cache due to entry state or properties
613 if (!e
.mem_obj
->vary_headers
.isEmpty()) {
614 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
615 debugs(20, 5, "Vary not yet supported: " << e
.mem_obj
->vary_headers
);
619 const int64_t expectedSize
= e
.mem_obj
->expectedReplySize(); // may be < 0
620 const int64_t loadedSize
= e
.mem_obj
->endOffset();
621 const int64_t ramSize
= max(loadedSize
, expectedSize
);
622 if (ramSize
> maxObjectSize()) {
623 debugs(20, 5, "Too big max(" <<
624 loadedSize
<< ", " << expectedSize
<< "): " << e
);
625 return false; // will not cache due to cachable entry size limits
628 if (!e
.mem_obj
->isContiguous()) {
629 debugs(20, 5, "not contiguous");
634 debugs(20, 5, "No map to mem-cache " << e
);
638 if (EBIT_TEST(e
.flags
, ENTRY_SPECIAL
)) {
639 debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e
);
646 /// locks map anchor and preps to store the entry in shared memory
648 MemStore::startCaching(StoreEntry
&e
)
651 Ipc::StoreMapAnchor
*slot
= map
->openForWriting(reinterpret_cast<const cache_key
*>(e
.key
), index
);
653 debugs(20, 5, "No room in mem-cache map to index " << e
);
658 e
.mem_obj
->memCache
.index
= index
;
659 e
.mem_obj
->memCache
.io
= MemObject::ioWriting
;
661 // Do not allow others to feed off an unknown-size entry because we will
662 // stop swapping it out if it grows too large.
663 if (e
.mem_obj
->expectedReplySize() >= 0)
664 map
->startAppending(index
);
665 e
.memOutDecision(true);
669 /// copies all local data to shared memory
671 MemStore::copyToShm(StoreEntry
&e
)
675 Must(!EBIT_TEST(e
.flags
, ENTRY_FWD_HDR_WAIT
));
677 const int64_t eSize
= e
.mem_obj
->endOffset();
678 if (e
.mem_obj
->memCache
.offset
>= eSize
) {
679 debugs(20, 5, "postponing copying " << e
<< " for lack of news: " <<
680 e
.mem_obj
->memCache
.offset
<< " >= " << eSize
);
681 return; // nothing to do (yet)
684 // throw if an accepted unknown-size entry grew too big or max-size changed
685 Must(eSize
<= maxObjectSize());
687 const int32_t index
= e
.mem_obj
->memCache
.index
;
689 Ipc::StoreMapAnchor
&anchor
= map
->writeableEntry(index
);
690 lastWritingSlice
= anchor
.start
;
692 // fill, skip slices that are already full
693 // Optimize: remember lastWritingSlice in e.mem_obj
694 while (e
.mem_obj
->memCache
.offset
< eSize
) {
695 Ipc::StoreMap::Slice
&slice
= nextAppendableSlice(
696 e
.mem_obj
->memCache
.index
, lastWritingSlice
);
697 if (anchor
.start
< 0)
698 anchor
.start
= lastWritingSlice
;
699 copyToShmSlice(e
, anchor
, slice
);
702 debugs(20, 7, "mem-cached available " << eSize
<< " bytes of " << e
);
705 /// copies at most one slice worth of local memory to shared memory
707 MemStore::copyToShmSlice(StoreEntry
&e
, Ipc::StoreMapAnchor
&anchor
, Ipc::StoreMap::Slice
&slice
)
709 Ipc::Mem::PageId page
= pageForSlice(lastWritingSlice
);
710 debugs(20, 7, "entry " << e
<< " slice " << lastWritingSlice
<< " has " <<
713 const int64_t bufSize
= Ipc::Mem::PageSize();
714 const int64_t sliceOffset
= e
.mem_obj
->memCache
.offset
% bufSize
;
715 StoreIOBuffer
sharedSpace(bufSize
- sliceOffset
, e
.mem_obj
->memCache
.offset
,
716 static_cast<char*>(PagePointer(page
)) + sliceOffset
);
718 // check that we kept everything or purge incomplete/sparse cached entry
719 const ssize_t copied
= e
.mem_obj
->data_hdr
.copy(sharedSpace
);
721 debugs(20, 2, "Failed to mem-cache " << (bufSize
- sliceOffset
) <<
722 " bytes of " << e
<< " from " << e
.mem_obj
->memCache
.offset
<<
724 throw TexcHere("data_hdr.copy failure");
727 debugs(20, 7, "mem-cached " << copied
<< " bytes of " << e
<<
728 " from " << e
.mem_obj
->memCache
.offset
<< " in " << page
);
730 slice
.size
+= copied
;
731 e
.mem_obj
->memCache
.offset
+= copied
;
732 anchor
.basics
.swap_file_sz
= e
.mem_obj
->memCache
.offset
;
735 /// starts checking with the entry chain slice at a given offset and
736 /// returns a not-full (but not necessarily empty) slice, updating sliceOffset
737 Ipc::StoreMap::Slice
&
738 MemStore::nextAppendableSlice(const sfileno fileNo
, sfileno
&sliceOffset
)
740 // allocate the very first slot for the entry if needed
741 if (sliceOffset
< 0) {
742 Ipc::StoreMapAnchor
&anchor
= map
->writeableEntry(fileNo
);
743 Must(anchor
.start
< 0);
744 Ipc::Mem::PageId page
;
745 sliceOffset
= reserveSapForWriting(page
); // throws
746 extras
->items
[sliceOffset
].page
= page
;
747 anchor
.start
= sliceOffset
;
750 const size_t sliceCapacity
= Ipc::Mem::PageSize();
752 Ipc::StoreMap::Slice
&slice
= map
->writeableSlice(fileNo
, sliceOffset
);
754 if (slice
.size
>= sliceCapacity
) {
755 if (slice
.next
>= 0) {
756 sliceOffset
= slice
.next
;
760 Ipc::Mem::PageId page
;
761 slice
.next
= sliceOffset
= reserveSapForWriting(page
);
762 extras
->items
[sliceOffset
].page
= page
;
763 debugs(20, 7, "entry " << fileNo
<< " new slice: " << sliceOffset
);
764 continue; // to get and return the slice at the new sliceOffset
772 /// safely returns a previously allocated memory page for the given entry slice
774 MemStore::pageForSlice(Ipc::StoreMapSliceId sliceId
)
778 Ipc::Mem::PageId page
= extras
->items
[sliceId
].page
;
783 /// finds a slot and a free page to fill or throws
785 MemStore::reserveSapForWriting(Ipc::Mem::PageId
&page
)
787 Ipc::Mem::PageId slot
;
788 if (freeSlots
->pop(slot
)) {
789 const auto slotId
= slot
.number
- 1;
790 debugs(20, 5, "got a previously free slot: " << slotId
);
792 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage
, page
)) {
793 debugs(20, 5, "and got a previously free page: " << page
);
794 map
->prepFreeSlice(slotId
);
797 debugs(20, 3, "but there is no free page, returning " << slotId
);
798 freeSlots
->push(slot
);
802 // catch free slots delivered to noteFreeMapSlice()
804 waitingFor
.slot
= &slot
;
805 waitingFor
.page
= &page
;
806 if (map
->purgeOne()) {
807 assert(!waitingFor
); // noteFreeMapSlice() should have cleared it
810 const auto slotId
= slot
.number
- 1;
811 map
->prepFreeSlice(slotId
);
812 debugs(20, 5, "got previously busy " << slotId
<< " and " << page
);
815 assert(waitingFor
.slot
== &slot
&& waitingFor
.page
== &page
);
816 waitingFor
.slot
= nullptr;
817 waitingFor
.page
= nullptr;
819 debugs(47, 3, "cannot get a slice; entries: " << map
->entryCount());
820 throw TexcHere("ran out of mem-cache slots");
824 MemStore::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId
)
826 Ipc::Mem::PageId
&pageId
= extras
->items
[sliceId
].page
;
827 debugs(20, 9, "slice " << sliceId
<< " freed " << pageId
);
829 Ipc::Mem::PageId slotId
;
830 slotId
.pool
= Ipc::Mem::PageStack::IdForMemStoreSpace();
831 slotId
.number
= sliceId
+ 1;
833 // must zero pageId before we give slice (and pageId extras!) to others
834 Ipc::Mem::PutPage(pageId
);
835 freeSlots
->push(slotId
);
837 *waitingFor
.slot
= slotId
;
838 *waitingFor
.page
= pageId
;
839 waitingFor
.slot
= nullptr;
840 waitingFor
.page
= nullptr;
841 pageId
= Ipc::Mem::PageId();
846 MemStore::write(StoreEntry
&e
)
850 debugs(20, 7, "entry " << e
);
852 switch (e
.mem_obj
->memCache
.io
) {
853 case MemObject::ioUndecided
:
854 if (!shouldCache(e
) || !startCaching(e
)) {
855 e
.mem_obj
->memCache
.io
= MemObject::ioDone
;
856 e
.memOutDecision(false);
861 case MemObject::ioDone
:
862 case MemObject::ioReading
:
863 return; // we should not write in all of the above cases
865 case MemObject::ioWriting
:
866 break; // already decided to write and still writing
871 if (e
.store_status
== STORE_OK
) // done receiving new content
874 CollapsedForwarding::Broadcast(e
);
876 } catch (const std::exception
&x
) { // TODO: should we catch ... as well?
877 debugs(20, 2, "mem-caching error writing entry " << e
<< ": " << x
.what());
878 // fall through to the error handling code
885 MemStore::completeWriting(StoreEntry
&e
)
888 const int32_t index
= e
.mem_obj
->memCache
.index
;
892 debugs(20, 5, "mem-cached all " << e
.mem_obj
->memCache
.offset
<< " bytes of " << e
);
894 e
.mem_obj
->memCache
.index
= -1;
895 e
.mem_obj
->memCache
.io
= MemObject::ioDone
;
896 map
->closeForWriting(index
);
898 CollapsedForwarding::Broadcast(e
);
903 MemStore::evictCached(StoreEntry
&e
)
906 if (e
.hasMemStore()) {
907 if (map
->freeEntry(e
.mem_obj
->memCache
.index
))
908 CollapsedForwarding::Broadcast(e
);
911 e
.destroyMemObject();
913 } else if (const auto key
= e
.publicKey()) {
914 // the entry may have been loaded and then disconnected from the cache
917 e
.destroyMemObject();
922 MemStore::evictIfFound(const cache_key
*key
)
925 map
->freeEntryByKey(key
);
929 MemStore::disconnect(StoreEntry
&e
)
932 MemObject
&mem_obj
= *e
.mem_obj
;
933 if (e
.hasMemStore()) {
934 if (mem_obj
.memCache
.io
== MemObject::ioWriting
) {
935 map
->abortWriting(mem_obj
.memCache
.index
);
936 mem_obj
.memCache
.index
= -1;
937 mem_obj
.memCache
.io
= MemObject::ioDone
;
938 CollapsedForwarding::Broadcast(e
);
941 assert(mem_obj
.memCache
.io
== MemObject::ioReading
);
942 map
->closeForReading(mem_obj
.memCache
.index
);
943 mem_obj
.memCache
.index
= -1;
944 mem_obj
.memCache
.io
= MemObject::ioDone
;
950 MemStore::Requested()
952 return Config
.memShared
&& Config
.memMaxSize
> 0;
955 /// calculates maximum number of entries we need to store and map
957 MemStore::EntryLimit()
962 const int64_t minEntrySize
= Ipc::Mem::PageSize();
963 const int64_t entryLimit
= Config
.memMaxSize
/ minEntrySize
;
967 /// reports our needs for shared memory pages to Ipc::Mem::Pages;
968 /// decides whether to use a shared memory cache or checks its configuration;
969 /// and initializes shared memory segments used by MemStore
970 class MemStoreRr
: public Ipc::Mem::RegisteredRunner
973 /* RegisteredRunner API */
974 MemStoreRr(): spaceOwner(nullptr), mapOwner(nullptr), extrasOwner(nullptr) {}
975 void finalizeConfig() override
;
976 void claimMemoryNeeds() override
;
977 void useConfig() override
;
978 ~MemStoreRr() override
;
981 /* Ipc::Mem::RegisteredRunner API */
982 void create() override
;
985 Ipc::Mem::Owner
<Ipc::Mem::PageStack
> *spaceOwner
; ///< free slices Owner
986 MemStoreMap::Owner
*mapOwner
; ///< primary map Owner
987 Ipc::Mem::Owner
<MemStoreMapExtras
> *extrasOwner
; ///< PageIds Owner
990 DefineRunnerRegistrator(MemStoreRr
);
993 MemStoreRr::claimMemoryNeeds()
995 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage
, MemStore::EntryLimit());
999 MemStoreRr::finalizeConfig()
1001 // decide whether to use a shared memory cache if the user did not specify
1002 if (!Config
.memShared
.configured()) {
1003 Config
.memShared
.configure(Ipc::Mem::Segment::Enabled() && UsingSmp() &&
1004 Config
.memMaxSize
> 0);
1005 } else if (Config
.memShared
&& !Ipc::Mem::Segment::Enabled()) {
1006 fatal("memory_cache_shared is on, but no support for shared memory detected");
1007 } else if (Config
.memShared
&& !UsingSmp()) {
1008 debugs(20, DBG_IMPORTANT
, "WARNING: memory_cache_shared is on, but only"
1009 " a single worker is running");
1012 if (MemStore::Requested() && Config
.memMaxSize
< Ipc::Mem::PageSize()) {
1013 debugs(20, DBG_IMPORTANT
, "WARNING: mem-cache size is too small (" <<
1014 (Config
.memMaxSize
/ 1024.0) << " KB), should be >= " <<
1015 (Ipc::Mem::PageSize() / 1024.0) << " KB");
1020 MemStoreRr::useConfig()
1022 assert(Config
.memShared
.configured());
1023 Ipc::Mem::RegisteredRunner::useConfig();
1027 MemStoreRr::create()
1029 if (!MemStore::Enabled())
1032 const int64_t entryLimit
= MemStore::EntryLimit();
1033 assert(entryLimit
> 0);
1035 Ipc::Mem::PageStack::Config spaceConfig
;
1036 spaceConfig
.poolId
= Ipc::Mem::PageStack::IdForMemStoreSpace();
1037 spaceConfig
.pageSize
= 0; // the pages are stored in Ipc::Mem::Pages
1038 spaceConfig
.capacity
= entryLimit
;
1039 spaceConfig
.createFull
= true; // all pages are initially available
1041 spaceOwner
= shm_new(Ipc::Mem::PageStack
)(SpaceLabel
, spaceConfig
);
1043 mapOwner
= MemStoreMap::Init(MapLabel
, entryLimit
);
1045 extrasOwner
= shm_new(MemStoreMapExtras
)(ExtrasLabel
, entryLimit
);
1048 MemStoreRr::~MemStoreRr()