2 * Copyright (C) 1996-2018 The Squid Software Foundation and contributors
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
9 /* DEBUG: section 20 Memory Cache */
12 #include "base/RunnersRegistry.h"
13 #include "CollapsedForwarding.h"
14 #include "HttpReply.h"
15 #include "ipc/mem/Page.h"
16 #include "ipc/mem/Pages.h"
17 #include "MemObject.h"
19 #include "mime_header.h"
20 #include "SquidConfig.h"
21 #include "SquidMath.h"
22 #include "StoreStats.h"
25 /// shared memory segment path to use for MemStore maps
26 static const SBuf
MapLabel("cache_mem_map");
27 /// shared memory segment path to use for the free slices index
28 static const char *SpaceLabel
= "cache_mem_space";
29 /// shared memory segment path to use for IDs of shared pages with slice data
30 static const char *ExtrasLabel
= "cache_mem_ex";
31 // TODO: sync with Rock::SwapDir::*Path()
33 // We store free slot IDs (i.e., "space") as Page objects so that we can use
34 // Ipc::Mem::PageStack. Pages require pool IDs. The value here is not really
35 // used except for a positivity test. A unique value is handy for debugging.
36 static const uint32_t SpacePoolId
= 510716;
38 /// Packs to shared memory, allocating new slots/pages as needed.
39 /// Requires an Ipc::StoreMapAnchor locked for writing.
40 class ShmWriter
: public Packable
43 ShmWriter(MemStore
&aStore
, StoreEntry
*anEntry
, const sfileno aFileNo
, Ipc::StoreMapSliceId aFirstSlice
= -1);
46 virtual void append(const char *aBuf
, int aSize
) override
;
47 virtual void vappendf(const char *fmt
, va_list ap
) override
;
50 StoreEntry
*entry
; ///< the entry being updated
52 /// the slot keeping the first byte of the appended content (at least)
53 /// either set via constructor parameter or allocated by the first append
54 Ipc::StoreMapSliceId firstSlice
;
56 /// the slot keeping the last byte of the appended content (at least)
57 Ipc::StoreMapSliceId lastSlice
;
59 uint64_t totalWritten
; ///< cumulative number of bytes appended so far
63 void copyToShmSlice(Ipc::StoreMap::Slice
&slice
);
69 /* set by (and only valid during) append calls */
70 const char *buf
; ///< content being appended now
71 int bufSize
; ///< buf size
72 int bufWritten
; ///< buf bytes appended so far
77 ShmWriter::ShmWriter(MemStore
&aStore
, StoreEntry
*anEntry
, const sfileno aFileNo
, Ipc::StoreMapSliceId aFirstSlice
):
79 firstSlice(aFirstSlice
),
80 lastSlice(firstSlice
),
92 ShmWriter::append(const char *aBuf
, int aBufSize
)
108 ShmWriter::vappendf(const char *fmt
, va_list ap
)
114 vaBuf
.vappendf(fmt
, apCopy
);
117 vaBuf
.vappendf(fmt
, ap
);
119 append(vaBuf
.rawContent(), vaBuf
.length());
122 /// copies the entire buffer to shared memory
124 ShmWriter::copyToShm()
126 Must(bufSize
> 0); // do not use up shared memory pages for nothing
127 Must(firstSlice
< 0 || lastSlice
>= 0);
129 // fill, skip slices that are already full
130 while (bufWritten
< bufSize
) {
131 Ipc::StoreMap::Slice
&slice
= store
.nextAppendableSlice(fileNo
, lastSlice
);
133 firstSlice
= lastSlice
;
134 copyToShmSlice(slice
);
137 debugs(20, 7, "stored " << bufWritten
<< '/' << totalWritten
<< " header bytes of " << *entry
);
140 /// copies at most one slice worth of buffer to shared memory
142 ShmWriter::copyToShmSlice(Ipc::StoreMap::Slice
&slice
)
144 Ipc::Mem::PageId page
= store
.pageForSlice(lastSlice
);
145 debugs(20, 7, "entry " << *entry
<< " slice " << lastSlice
<< " has " <<
148 Must(bufWritten
<= bufSize
);
149 const int64_t writingDebt
= bufSize
- bufWritten
;
150 const int64_t pageSize
= Ipc::Mem::PageSize();
151 const int64_t sliceOffset
= totalWritten
% pageSize
;
152 const int64_t copySize
= std::min(writingDebt
, pageSize
- sliceOffset
);
153 memcpy(static_cast<char*>(PagePointer(page
)) + sliceOffset
, buf
+ bufWritten
,
156 debugs(20, 7, "copied " << slice
.size
<< '+' << copySize
<< " bytes of " <<
157 entry
<< " from " << sliceOffset
<< " in " << page
);
159 slice
.size
+= copySize
;
160 bufWritten
+= copySize
;
161 totalWritten
+= copySize
;
162 // fresh anchor.basics.swap_file_sz is already set [to the stale value]
164 // either we wrote everything or we filled the entire slice
165 Must(bufWritten
== bufSize
|| sliceOffset
+ copySize
== pageSize
);
170 MemStore::MemStore(): map(NULL
), lastWritingSlice(-1)
174 MemStore::~MemStore()
182 const int64_t entryLimit
= EntryLimit();
184 return; // no memory cache configured or a misconfiguration
186 // check compatibility with the disk cache, if any
187 if (Config
.cacheSwap
.n_configured
> 0) {
188 const int64_t diskMaxSize
= Store::Root().maxObjectSize();
189 const int64_t memMaxSize
= maxObjectSize();
190 if (diskMaxSize
== -1) {
191 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
192 "is unlimited but mem-cache maximum object size is " <<
193 memMaxSize
/ 1024.0 << " KB");
194 } else if (diskMaxSize
> memMaxSize
) {
195 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
196 "is too large for mem-cache: " <<
197 diskMaxSize
/ 1024.0 << " KB > " <<
198 memMaxSize
/ 1024.0 << " KB");
202 freeSlots
= shm_old(Ipc::Mem::PageStack
)(SpaceLabel
);
203 extras
= shm_old(Extras
)(ExtrasLabel
);
206 map
= new MemStoreMap(MapLabel
);
211 MemStore::getStats(StoreInfoStats
&stats
) const
213 const size_t pageSize
= Ipc::Mem::PageSize();
215 stats
.mem
.shared
= true;
217 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
219 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
220 stats
.mem
.count
= currentCount();
224 MemStore::stat(StoreEntry
&e
) const
226 storeAppendPrintf(&e
, "\n\nShared Memory Cache\n");
228 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", maxSize()/1024.0);
229 storeAppendPrintf(&e
, "Current Size: %.2f KB %.2f%%\n",
230 currentSize() / 1024.0,
231 Math::doublePercent(currentSize(), maxSize()));
234 const int entryLimit
= map
->entryLimit();
235 const int slotLimit
= map
->sliceLimit();
236 storeAppendPrintf(&e
, "Maximum entries: %9d\n", entryLimit
);
237 if (entryLimit
> 0) {
238 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
239 currentCount(), (100.0 * currentCount() / entryLimit
));
242 storeAppendPrintf(&e
, "Maximum slots: %9d\n", slotLimit
);
244 const unsigned int slotsFree
=
245 Ipc::Mem::PagesAvailable(Ipc::Mem::PageId::cachePage
);
246 if (slotsFree
<= static_cast<const unsigned int>(slotLimit
)) {
247 const int usedSlots
= slotLimit
- static_cast<const int>(slotsFree
);
248 storeAppendPrintf(&e
, "Used slots: %9d %.2f%%\n",
249 usedSlots
, (100.0 * usedSlots
/ slotLimit
));
252 if (slotLimit
< 100) { // XXX: otherwise too expensive to count
253 Ipc::ReadWriteLockStats stats
;
254 map
->updateStats(stats
);
267 MemStore::minSize() const
269 return 0; // XXX: irrelevant, but Store parent forces us to implement this
273 MemStore::maxSize() const
275 return Config
.memMaxSize
;
279 MemStore::currentSize() const
281 return Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) *
282 Ipc::Mem::PageSize();
286 MemStore::currentCount() const
288 return map
? map
->entryCount() : 0;
292 MemStore::maxObjectSize() const
294 return min(Config
.Store
.maxInMemObjSize
, Config
.memMaxSize
);
298 MemStore::reference(StoreEntry
&)
303 MemStore::dereference(StoreEntry
&)
305 // no need to keep e in the global store_table for us; we have our own map
310 MemStore::get(const cache_key
*key
)
316 const Ipc::StoreMapAnchor
*const slot
= map
->openForReading(key
, index
);
320 // create a brand new store entry and initialize it with stored info
321 StoreEntry
*e
= new StoreEntry();
323 // XXX: We do not know the URLs yet, only the key, but we need to parse and
324 // store the response for the Root().get() callers to be happy because they
325 // expect IN_MEMORY entries to already have the response headers and body.
326 e
->createMemObject();
328 anchorEntry(*e
, index
, *slot
);
330 const bool copied
= copyFromShm(*e
, index
, *slot
);
337 debugs(20, 3, HERE
<< "mem-loading failed; freeing " << index
);
338 map
->freeEntry(index
); // do not let others into the same trap
343 MemStore::updateHeaders(StoreEntry
*updatedE
)
348 Ipc::StoreMapUpdate
update(updatedE
);
350 assert(updatedE
->mem_obj
);
351 if (!map
->openForUpdating(update
, updatedE
->mem_obj
->memCache
.index
))
355 updateHeadersOrThrow(update
);
356 } catch (const std::exception
&ex
) {
357 debugs(20, 2, "error starting to update entry " << *updatedE
<< ": " << ex
.what());
358 map
->abortUpdating(update
);
363 MemStore::updateHeadersOrThrow(Ipc::StoreMapUpdate
&update
)
365 // our +/- hdr_sz math below does not work if the chains differ [in size]
366 Must(update
.stale
.anchor
->basics
.swap_file_sz
== update
.fresh
.anchor
->basics
.swap_file_sz
);
368 const HttpReply
*rawReply
= update
.entry
->getReply();
370 const HttpReply
&reply
= *rawReply
;
371 const uint64_t staleHdrSz
= reply
.hdr_sz
;
372 debugs(20, 7, "stale hdr_sz: " << staleHdrSz
);
374 /* we will need to copy same-slice payload after the stored headers later */
375 Must(staleHdrSz
> 0);
376 update
.stale
.splicingPoint
= map
->sliceContaining(update
.stale
.fileNo
, staleHdrSz
);
377 Must(update
.stale
.splicingPoint
>= 0);
378 Must(update
.stale
.anchor
->basics
.swap_file_sz
>= staleHdrSz
);
380 Must(update
.stale
.anchor
);
381 ShmWriter
writer(*this, update
.entry
, update
.fresh
.fileNo
);
382 reply
.packHeadersInto(&writer
);
383 const uint64_t freshHdrSz
= writer
.totalWritten
;
384 debugs(20, 7, "fresh hdr_sz: " << freshHdrSz
<< " diff: " << (freshHdrSz
- staleHdrSz
));
386 /* copy same-slice payload remaining after the stored headers */
387 const Ipc::StoreMapSlice
&slice
= map
->readableSlice(update
.stale
.fileNo
, update
.stale
.splicingPoint
);
388 const Ipc::StoreMapSlice::Size sliceCapacity
= Ipc::Mem::PageSize();
389 const Ipc::StoreMapSlice::Size headersInLastSlice
= staleHdrSz
% sliceCapacity
;
390 Must(headersInLastSlice
> 0); // or sliceContaining() would have stopped earlier
391 Must(slice
.size
>= headersInLastSlice
);
392 const Ipc::StoreMapSlice::Size payloadInLastSlice
= slice
.size
- headersInLastSlice
;
393 const MemStoreMapExtras::Item
&extra
= extras
->items
[update
.stale
.splicingPoint
];
394 char *page
= static_cast<char*>(PagePointer(extra
.page
));
395 debugs(20, 5, "appending same-slice payload: " << payloadInLastSlice
);
396 writer
.append(page
+ headersInLastSlice
, payloadInLastSlice
);
397 update
.fresh
.splicingPoint
= writer
.lastSlice
;
399 update
.fresh
.anchor
->basics
.swap_file_sz
-= staleHdrSz
;
400 update
.fresh
.anchor
->basics
.swap_file_sz
+= freshHdrSz
;
402 map
->closeForUpdating(update
);
406 MemStore::anchorCollapsed(StoreEntry
&collapsed
, bool &inSync
)
412 const Ipc::StoreMapAnchor
*const slot
= map
->openForReading(
413 reinterpret_cast<cache_key
*>(collapsed
.key
), index
);
417 anchorEntry(collapsed
, index
, *slot
);
418 inSync
= updateCollapsedWith(collapsed
, index
, *slot
);
419 return true; // even if inSync is false
423 MemStore::updateCollapsed(StoreEntry
&collapsed
)
425 assert(collapsed
.mem_obj
);
427 const sfileno index
= collapsed
.mem_obj
->memCache
.index
;
429 // already disconnected from the cache, no need to update
436 const Ipc::StoreMapAnchor
&anchor
= map
->readableEntry(index
);
437 return updateCollapsedWith(collapsed
, index
, anchor
);
440 /// updates collapsed entry after its anchor has been located
442 MemStore::updateCollapsedWith(StoreEntry
&collapsed
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
444 collapsed
.swap_file_sz
= anchor
.basics
.swap_file_sz
;
445 const bool copied
= copyFromShm(collapsed
, index
, anchor
);
449 /// anchors StoreEntry to an already locked map entry
451 MemStore::anchorEntry(StoreEntry
&e
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
453 const Ipc::StoreMapAnchor::Basics
&basics
= anchor
.basics
;
455 e
.swap_file_sz
= basics
.swap_file_sz
;
456 e
.lastref
= basics
.lastref
;
457 e
.timestamp
= basics
.timestamp
;
458 e
.expires
= basics
.expires
;
459 e
.lastModified(basics
.lastmod
);
460 e
.refcount
= basics
.refcount
;
461 e
.flags
= basics
.flags
;
464 if (anchor
.complete()) {
465 e
.store_status
= STORE_OK
;
466 e
.mem_obj
->object_sz
= e
.swap_file_sz
;
467 e
.setMemStatus(IN_MEMORY
);
469 e
.store_status
= STORE_PENDING
;
470 assert(e
.mem_obj
->object_sz
< 0);
471 e
.setMemStatus(NOT_IN_MEMORY
);
473 assert(e
.swap_status
== SWAPOUT_NONE
); // set in StoreEntry constructor
474 e
.ping_status
= PING_NONE
;
476 EBIT_CLR(e
.flags
, RELEASE_REQUEST
);
478 EBIT_SET(e
.flags
, ENTRY_VALIDATED
);
480 MemObject::MemCache
&mc
= e
.mem_obj
->memCache
;
482 mc
.io
= MemObject::ioReading
;
485 /// copies the entire entry from shared to local memory
487 MemStore::copyFromShm(StoreEntry
&e
, const sfileno index
, const Ipc::StoreMapAnchor
&anchor
)
489 debugs(20, 7, "mem-loading entry " << index
<< " from " << anchor
.start
);
492 // emulate the usual Store code but w/o inapplicable checks and callbacks:
494 Ipc::StoreMapSliceId sid
= anchor
.start
; // optimize: remember the last sid
495 bool wasEof
= anchor
.complete() && sid
< 0;
496 int64_t sliceOffset
= 0;
498 const Ipc::StoreMapSlice
&slice
= map
->readableSlice(index
, sid
);
499 // slice state may change during copying; take snapshots now
500 wasEof
= anchor
.complete() && slice
.next
< 0;
501 const Ipc::StoreMapSlice::Size wasSize
= slice
.size
;
503 debugs(20, 9, "entry " << index
<< " slice " << sid
<< " eof " <<
504 wasEof
<< " wasSize " << wasSize
<< " <= " <<
505 anchor
.basics
.swap_file_sz
<< " sliceOffset " << sliceOffset
<<
506 " mem.endOffset " << e
.mem_obj
->endOffset());
508 if (e
.mem_obj
->endOffset() < sliceOffset
+ wasSize
) {
509 // size of the slice data that we already copied
510 const size_t prefixSize
= e
.mem_obj
->endOffset() - sliceOffset
;
511 assert(prefixSize
<= wasSize
);
513 const MemStoreMapExtras::Item
&extra
= extras
->items
[sid
];
515 char *page
= static_cast<char*>(PagePointer(extra
.page
));
516 const StoreIOBuffer
sliceBuf(wasSize
- prefixSize
,
517 e
.mem_obj
->endOffset(),
519 if (!copyFromShmSlice(e
, sliceBuf
, wasEof
))
521 debugs(20, 9, "entry " << index
<< " copied slice " << sid
<<
522 " from " << extra
.page
<< '+' << prefixSize
);
524 // else skip a [possibly incomplete] slice that we copied earlier
526 // careful: the slice may have grown _and_ gotten the next slice ID!
527 if (slice
.next
>= 0) {
529 // here we know that slice.size may not change any more
530 if (wasSize
>= slice
.size
) { // did not grow since we started copying
531 sliceOffset
+= wasSize
;
534 } else if (wasSize
>= slice
.size
) { // did not grow
540 debugs(20, 7, "mem-loaded " << e
.mem_obj
->endOffset() << '/' <<
541 anchor
.basics
.swap_file_sz
<< " bytes of " << e
);
545 debugs(20, 7, "mem-loaded all " << e
.mem_obj
->object_sz
<< '/' <<
546 anchor
.basics
.swap_file_sz
<< " bytes of " << e
);
548 // from StoreEntry::complete()
549 e
.mem_obj
->object_sz
= e
.mem_obj
->endOffset();
550 e
.store_status
= STORE_OK
;
551 e
.setMemStatus(IN_MEMORY
);
553 assert(e
.mem_obj
->object_sz
>= 0);
554 assert(static_cast<uint64_t>(e
.mem_obj
->object_sz
) == anchor
.basics
.swap_file_sz
);
555 // would be nice to call validLength() here, but it needs e.key
557 // we read the entire response into the local memory; no more need to lock
562 /// imports one shared memory slice into local memory
564 MemStore::copyFromShmSlice(StoreEntry
&e
, const StoreIOBuffer
&buf
, bool eof
)
566 debugs(20, 7, "buf: " << buf
.offset
<< " + " << buf
.length
);
568 // from store_client::readBody()
569 // parse headers if needed; they might span multiple slices!
570 HttpReply
*rep
= (HttpReply
*)e
.getReply();
571 if (rep
->pstate
< Http::Message::psParsed
) {
572 // XXX: have to copy because httpMsgParseStep() requires 0-termination
574 mb
.init(buf
.length
+1, buf
.length
+1);
575 mb
.append(buf
.data
, buf
.length
);
577 const int result
= rep
->httpMsgParseStep(mb
.buf
, buf
.length
, eof
);
579 assert(rep
->pstate
== Http::Message::psParsed
);
580 EBIT_CLR(e
.flags
, ENTRY_FWD_HDR_WAIT
);
581 } else if (result
< 0) {
582 debugs(20, DBG_IMPORTANT
, "Corrupted mem-cached headers: " << e
);
584 } else { // more slices are needed
588 debugs(20, 7, "rep pstate: " << rep
->pstate
);
590 // local memory stores both headers and body so copy regardless of pstate
591 const int64_t offBefore
= e
.mem_obj
->endOffset();
592 assert(e
.mem_obj
->data_hdr
.write(buf
)); // from MemObject::write()
593 const int64_t offAfter
= e
.mem_obj
->endOffset();
594 // expect to write the entire buf because StoreEntry::write() never fails
595 assert(offAfter
>= 0 && offBefore
<= offAfter
&&
596 static_cast<size_t>(offAfter
- offBefore
) == buf
.length
);
600 /// whether we should cache the entry
602 MemStore::shouldCache(StoreEntry
&e
) const
604 if (e
.mem_status
== IN_MEMORY
) {
605 debugs(20, 5, "already loaded from mem-cache: " << e
);
609 if (e
.mem_obj
&& e
.mem_obj
->memCache
.offset
> 0) {
610 debugs(20, 5, "already written to mem-cache: " << e
);
614 if (!e
.memoryCachable()) {
615 debugs(20, 7, HERE
<< "Not memory cachable: " << e
);
616 return false; // will not cache due to entry state or properties
621 if (!e
.mem_obj
->vary_headers
.isEmpty()) {
622 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
623 debugs(20, 5, "Vary not yet supported: " << e
.mem_obj
->vary_headers
);
627 const int64_t expectedSize
= e
.mem_obj
->expectedReplySize(); // may be < 0
628 const int64_t loadedSize
= e
.mem_obj
->endOffset();
629 const int64_t ramSize
= max(loadedSize
, expectedSize
);
630 if (ramSize
> maxObjectSize()) {
631 debugs(20, 5, HERE
<< "Too big max(" <<
632 loadedSize
<< ", " << expectedSize
<< "): " << e
);
633 return false; // will not cache due to cachable entry size limits
636 if (!e
.mem_obj
->isContiguous()) {
637 debugs(20, 5, "not contiguous");
642 debugs(20, 5, HERE
<< "No map to mem-cache " << e
);
646 if (EBIT_TEST(e
.flags
, ENTRY_SPECIAL
)) {
647 debugs(20, 5, "Not mem-caching ENTRY_SPECIAL " << e
);
654 /// locks map anchor and preps to store the entry in shared memory
656 MemStore::startCaching(StoreEntry
&e
)
659 Ipc::StoreMapAnchor
*slot
= map
->openForWriting(reinterpret_cast<const cache_key
*>(e
.key
), index
);
661 debugs(20, 5, HERE
<< "No room in mem-cache map to index " << e
);
666 e
.mem_obj
->memCache
.index
= index
;
667 e
.mem_obj
->memCache
.io
= MemObject::ioWriting
;
669 // Do not allow others to feed off an unknown-size entry because we will
670 // stop swapping it out if it grows too large.
671 if (e
.mem_obj
->expectedReplySize() >= 0)
672 map
->startAppending(index
);
673 e
.memOutDecision(true);
677 /// copies all local data to shared memory
679 MemStore::copyToShm(StoreEntry
&e
)
681 // prevents remote readers from getting ENTRY_FWD_HDR_WAIT entries and
682 // not knowing when the wait is over
683 if (EBIT_TEST(e
.flags
, ENTRY_FWD_HDR_WAIT
)) {
684 debugs(20, 5, "postponing copying " << e
<< " for ENTRY_FWD_HDR_WAIT");
691 const int64_t eSize
= e
.mem_obj
->endOffset();
692 if (e
.mem_obj
->memCache
.offset
>= eSize
) {
693 debugs(20, 5, "postponing copying " << e
<< " for lack of news: " <<
694 e
.mem_obj
->memCache
.offset
<< " >= " << eSize
);
695 return; // nothing to do (yet)
698 // throw if an accepted unknown-size entry grew too big or max-size changed
699 Must(eSize
<= maxObjectSize());
701 const int32_t index
= e
.mem_obj
->memCache
.index
;
703 Ipc::StoreMapAnchor
&anchor
= map
->writeableEntry(index
);
704 lastWritingSlice
= anchor
.start
;
706 // fill, skip slices that are already full
707 // Optimize: remember lastWritingSlice in e.mem_obj
708 while (e
.mem_obj
->memCache
.offset
< eSize
) {
709 Ipc::StoreMap::Slice
&slice
= nextAppendableSlice(
710 e
.mem_obj
->memCache
.index
, lastWritingSlice
);
711 if (anchor
.start
< 0)
712 anchor
.start
= lastWritingSlice
;
713 copyToShmSlice(e
, anchor
, slice
);
716 debugs(20, 7, "mem-cached available " << eSize
<< " bytes of " << e
);
719 /// copies at most one slice worth of local memory to shared memory
721 MemStore::copyToShmSlice(StoreEntry
&e
, Ipc::StoreMapAnchor
&anchor
, Ipc::StoreMap::Slice
&slice
)
723 Ipc::Mem::PageId page
= pageForSlice(lastWritingSlice
);
724 debugs(20, 7, "entry " << e
<< " slice " << lastWritingSlice
<< " has " <<
727 const int64_t bufSize
= Ipc::Mem::PageSize();
728 const int64_t sliceOffset
= e
.mem_obj
->memCache
.offset
% bufSize
;
729 StoreIOBuffer
sharedSpace(bufSize
- sliceOffset
, e
.mem_obj
->memCache
.offset
,
730 static_cast<char*>(PagePointer(page
)) + sliceOffset
);
732 // check that we kept everything or purge incomplete/sparse cached entry
733 const ssize_t copied
= e
.mem_obj
->data_hdr
.copy(sharedSpace
);
735 debugs(20, 2, "Failed to mem-cache " << (bufSize
- sliceOffset
) <<
736 " bytes of " << e
<< " from " << e
.mem_obj
->memCache
.offset
<<
738 throw TexcHere("data_hdr.copy failure");
741 debugs(20, 7, "mem-cached " << copied
<< " bytes of " << e
<<
742 " from " << e
.mem_obj
->memCache
.offset
<< " in " << page
);
744 slice
.size
+= copied
;
745 e
.mem_obj
->memCache
.offset
+= copied
;
746 anchor
.basics
.swap_file_sz
= e
.mem_obj
->memCache
.offset
;
749 /// starts checking with the entry chain slice at a given offset and
750 /// returns a not-full (but not necessarily empty) slice, updating sliceOffset
751 Ipc::StoreMap::Slice
&
752 MemStore::nextAppendableSlice(const sfileno fileNo
, sfileno
&sliceOffset
)
754 // allocate the very first slot for the entry if needed
755 if (sliceOffset
< 0) {
756 Ipc::StoreMapAnchor
&anchor
= map
->writeableEntry(fileNo
);
757 Must(anchor
.start
< 0);
758 Ipc::Mem::PageId page
;
759 sliceOffset
= reserveSapForWriting(page
); // throws
760 extras
->items
[sliceOffset
].page
= page
;
761 anchor
.start
= sliceOffset
;
764 const size_t sliceCapacity
= Ipc::Mem::PageSize();
766 Ipc::StoreMap::Slice
&slice
= map
->writeableSlice(fileNo
, sliceOffset
);
768 if (slice
.size
>= sliceCapacity
) {
769 if (slice
.next
>= 0) {
770 sliceOffset
= slice
.next
;
774 Ipc::Mem::PageId page
;
775 slice
.next
= sliceOffset
= reserveSapForWriting(page
);
776 extras
->items
[sliceOffset
].page
= page
;
777 debugs(20, 7, "entry " << fileNo
<< " new slice: " << sliceOffset
);
778 continue; // to get and return the slice at the new sliceOffset
786 /// safely returns a previously allocated memory page for the given entry slice
788 MemStore::pageForSlice(Ipc::StoreMapSliceId sliceId
)
792 Ipc::Mem::PageId page
= extras
->items
[sliceId
].page
;
797 /// finds a slot and a free page to fill or throws
799 MemStore::reserveSapForWriting(Ipc::Mem::PageId
&page
)
801 Ipc::Mem::PageId slot
;
802 if (freeSlots
->pop(slot
)) {
803 debugs(20, 5, "got a previously free slot: " << slot
);
805 if (Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage
, page
)) {
806 debugs(20, 5, "and got a previously free page: " << page
);
807 return slot
.number
- 1;
809 debugs(20, 3, "but there is no free page, returning " << slot
);
810 freeSlots
->push(slot
);
814 // catch free slots delivered to noteFreeMapSlice()
816 waitingFor
.slot
= &slot
;
817 waitingFor
.page
= &page
;
818 if (map
->purgeOne()) {
819 assert(!waitingFor
); // noteFreeMapSlice() should have cleared it
822 debugs(20, 5, "got previously busy " << slot
<< " and " << page
);
823 return slot
.number
- 1;
825 assert(waitingFor
.slot
== &slot
&& waitingFor
.page
== &page
);
826 waitingFor
.slot
= NULL
;
827 waitingFor
.page
= NULL
;
829 debugs(47, 3, "cannot get a slice; entries: " << map
->entryCount());
830 throw TexcHere("ran out of mem-cache slots");
834 MemStore::noteFreeMapSlice(const Ipc::StoreMapSliceId sliceId
)
836 Ipc::Mem::PageId
&pageId
= extras
->items
[sliceId
].page
;
837 debugs(20, 9, "slice " << sliceId
<< " freed " << pageId
);
839 Ipc::Mem::PageId slotId
;
840 slotId
.pool
= SpacePoolId
;
841 slotId
.number
= sliceId
+ 1;
843 // must zero pageId before we give slice (and pageId extras!) to others
844 Ipc::Mem::PutPage(pageId
);
845 freeSlots
->push(slotId
);
847 *waitingFor
.slot
= slotId
;
848 *waitingFor
.page
= pageId
;
849 waitingFor
.slot
= NULL
;
850 waitingFor
.page
= NULL
;
851 pageId
= Ipc::Mem::PageId();
856 MemStore::write(StoreEntry
&e
)
860 debugs(20, 7, "entry " << e
);
862 switch (e
.mem_obj
->memCache
.io
) {
863 case MemObject::ioUndecided
:
864 if (!shouldCache(e
) || !startCaching(e
)) {
865 e
.mem_obj
->memCache
.io
= MemObject::ioDone
;
866 e
.memOutDecision(false);
871 case MemObject::ioDone
:
872 case MemObject::ioReading
:
873 return; // we should not write in all of the above cases
875 case MemObject::ioWriting
:
876 break; // already decided to write and still writing
881 if (e
.store_status
== STORE_OK
) // done receiving new content
884 CollapsedForwarding::Broadcast(e
);
886 } catch (const std::exception
&x
) { // TODO: should we catch ... as well?
887 debugs(20, 2, "mem-caching error writing entry " << e
<< ": " << x
.what());
888 // fall through to the error handling code
895 MemStore::completeWriting(StoreEntry
&e
)
898 const int32_t index
= e
.mem_obj
->memCache
.index
;
902 debugs(20, 5, "mem-cached all " << e
.mem_obj
->memCache
.offset
<< " bytes of " << e
);
904 e
.mem_obj
->memCache
.index
= -1;
905 e
.mem_obj
->memCache
.io
= MemObject::ioDone
;
906 map
->closeForWriting(index
, false);
908 CollapsedForwarding::Broadcast(e
); // before we close our transient entry!
909 Store::Root().transientsCompleteWriting(e
);
913 MemStore::markForUnlink(StoreEntry
&e
)
916 if (e
.mem_obj
->memCache
.index
>= 0)
917 map
->freeEntry(e
.mem_obj
->memCache
.index
);
921 MemStore::unlink(StoreEntry
&e
)
923 if (e
.mem_obj
&& e
.mem_obj
->memCache
.index
>= 0) {
924 map
->freeEntry(e
.mem_obj
->memCache
.index
);
927 // the entry may have been loaded and then disconnected from the cache
928 map
->freeEntryByKey(reinterpret_cast<cache_key
*>(e
.key
));
931 e
.destroyMemObject(); // XXX: but it may contain useful info such as a client list. The old code used to do that though, right?
935 MemStore::disconnect(StoreEntry
&e
)
938 MemObject
&mem_obj
= *e
.mem_obj
;
939 if (mem_obj
.memCache
.index
>= 0) {
940 if (mem_obj
.memCache
.io
== MemObject::ioWriting
) {
941 map
->abortWriting(mem_obj
.memCache
.index
);
942 mem_obj
.memCache
.index
= -1;
943 mem_obj
.memCache
.io
= MemObject::ioDone
;
944 Store::Root().transientsAbandon(e
); // broadcasts after the change
946 assert(mem_obj
.memCache
.io
== MemObject::ioReading
);
947 map
->closeForReading(mem_obj
.memCache
.index
);
948 mem_obj
.memCache
.index
= -1;
949 mem_obj
.memCache
.io
= MemObject::ioDone
;
954 /// calculates maximum number of entries we need to store and map
956 MemStore::EntryLimit()
958 if (!Config
.memShared
|| !Config
.memMaxSize
)
959 return 0; // no memory cache configured
961 const int64_t minEntrySize
= Ipc::Mem::PageSize();
962 const int64_t entryLimit
= Config
.memMaxSize
/ minEntrySize
;
966 /// reports our needs for shared memory pages to Ipc::Mem::Pages;
967 /// decides whether to use a shared memory cache or checks its configuration;
968 /// and initializes shared memory segments used by MemStore
969 class MemStoreRr
: public Ipc::Mem::RegisteredRunner
972 /* RegisteredRunner API */
973 MemStoreRr(): spaceOwner(NULL
), mapOwner(NULL
), extrasOwner(NULL
) {}
974 virtual void finalizeConfig();
975 virtual void claimMemoryNeeds();
976 virtual void useConfig();
977 virtual ~MemStoreRr();
980 /* Ipc::Mem::RegisteredRunner API */
981 virtual void create();
984 Ipc::Mem::Owner
<Ipc::Mem::PageStack
> *spaceOwner
; ///< free slices Owner
985 MemStoreMap::Owner
*mapOwner
; ///< primary map Owner
986 Ipc::Mem::Owner
<MemStoreMapExtras
> *extrasOwner
; ///< PageIds Owner
989 RunnerRegistrationEntry(MemStoreRr
);
992 MemStoreRr::claimMemoryNeeds()
994 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage
, MemStore::EntryLimit());
998 MemStoreRr::finalizeConfig()
1000 // decide whether to use a shared memory cache if the user did not specify
1001 if (!Config
.memShared
.configured()) {
1002 Config
.memShared
.configure(Ipc::Mem::Segment::Enabled() && UsingSmp() &&
1003 Config
.memMaxSize
> 0);
1004 } else if (Config
.memShared
&& !Ipc::Mem::Segment::Enabled()) {
1005 fatal("memory_cache_shared is on, but no support for shared memory detected");
1006 } else if (Config
.memShared
&& !UsingSmp()) {
1007 debugs(20, DBG_IMPORTANT
, "WARNING: memory_cache_shared is on, but only"
1008 " a single worker is running");
1013 MemStoreRr::useConfig()
1015 assert(Config
.memShared
.configured());
1016 Ipc::Mem::RegisteredRunner::useConfig();
1020 MemStoreRr::create()
1022 if (!Config
.memShared
)
1025 const int64_t entryLimit
= MemStore::EntryLimit();
1026 if (entryLimit
<= 0) {
1027 if (Config
.memMaxSize
> 0) {
1028 debugs(20, DBG_IMPORTANT
, "WARNING: mem-cache size is too small ("
1029 << (Config
.memMaxSize
/ 1024.0) << " KB), should be >= " <<
1030 (Ipc::Mem::PageSize() / 1024.0) << " KB");
1032 return; // no memory cache configured or a misconfiguration
1036 spaceOwner
= shm_new(Ipc::Mem::PageStack
)(SpaceLabel
, SpacePoolId
,
1039 mapOwner
= MemStoreMap::Init(MapLabel
, entryLimit
);
1041 extrasOwner
= shm_new(MemStoreMapExtras
)(ExtrasLabel
, entryLimit
);
1044 MemStoreRr::~MemStoreRr()