2 * DEBUG: section 20 Memory Cache
7 #include "base/RunnersRegistry.h"
9 #include "ipc/mem/Page.h"
10 #include "ipc/mem/Pages.h"
11 #include "MemObject.h"
13 #include "mime_header.h"
14 #include "SquidConfig.h"
15 #include "StoreStats.h"
18 /// shared memory segment path to use for MemStore maps
19 static const char *ShmLabel
= "cache_mem";
21 // XXX: support storage using more than one page per entry
23 MemStore::MemStore(): map(NULL
), theCurrentSize(0)
35 const int64_t entryLimit
= EntryLimit();
37 return; // no memory cache configured or a misconfiguration
39 const int64_t diskMaxSize
= Store::Root().maxObjectSize();
40 const int64_t memMaxSize
= maxObjectSize();
41 if (diskMaxSize
== -1) {
42 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
43 "is unlimited but mem-cache maximum object size is " <<
44 memMaxSize
/ 1024.0 << " KB");
45 } else if (diskMaxSize
> memMaxSize
) {
46 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
47 "is too large for mem-cache: " <<
48 diskMaxSize
/ 1024.0 << " KB > " <<
49 memMaxSize
/ 1024.0 << " KB");
52 map
= new MemStoreMap(ShmLabel
);
57 MemStore::getStats(StoreInfoStats
&stats
) const
59 const size_t pageSize
= Ipc::Mem::PageSize();
61 stats
.mem
.shared
= true;
63 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
65 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
66 stats
.mem
.count
= currentCount();
70 MemStore::stat(StoreEntry
&e
) const
72 storeAppendPrintf(&e
, "\n\nShared Memory Cache\n");
74 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", Config
.memMaxSize
/1024.0);
77 const int limit
= map
->entryLimit();
78 storeAppendPrintf(&e
, "Maximum entries: %9d\n", limit
);
80 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
81 currentCount(), (100.0 * currentCount() / limit
));
83 if (limit
< 100) { // XXX: otherwise too expensive to count
84 Ipc::ReadWriteLockStats stats
;
85 map
->updateStats(stats
);
98 MemStore::minSize() const
100 return 0; // XXX: irrelevant, but Store parent forces us to implement this
104 MemStore::maxSize() const
106 return 0; // XXX: make configurable
110 MemStore::currentSize() const
112 return theCurrentSize
;
116 MemStore::currentCount() const
118 return map
? map
->entryCount() : 0;
122 MemStore::maxObjectSize() const
124 return Ipc::Mem::PageSize();
128 MemStore::reference(StoreEntry
&)
133 MemStore::dereference(StoreEntry
&, bool)
135 // no need to keep e in the global store_table for us; we have our own map
146 MemStore::search(String
const, HttpRequest
*)
148 fatal("not implemented");
153 MemStore::get(const cache_key
*key
)
158 // XXX: replace sfileno with a bigger word (sfileno is only for cache_dirs)
160 const Ipc::StoreMapSlot
*const slot
= map
->openForReading(key
, index
);
164 const Ipc::StoreMapSlot::Basics
&basics
= slot
->basics
;
165 const MemStoreMap::Extras
&extras
= map
->extras(index
);
167 // create a brand new store entry and initialize it with stored info
168 StoreEntry
*e
= new StoreEntry();
171 e
->swap_file_sz
= basics
.swap_file_sz
;
172 e
->lastref
= basics
.lastref
;
173 e
->timestamp
= basics
.timestamp
;
174 e
->expires
= basics
.expires
;
175 e
->lastmod
= basics
.lastmod
;
176 e
->refcount
= basics
.refcount
;
177 e
->flags
= basics
.flags
;
179 e
->store_status
= STORE_OK
;
180 e
->mem_status
= IN_MEMORY
; // setMemStatus(IN_MEMORY) requires mem_obj
181 //e->swap_status = set in StoreEntry constructor to SWAPOUT_NONE;
182 e
->ping_status
= PING_NONE
;
184 EBIT_SET(e
->flags
, ENTRY_CACHABLE
);
185 EBIT_CLR(e
->flags
, RELEASE_REQUEST
);
186 EBIT_CLR(e
->flags
, KEY_PRIVATE
);
187 EBIT_SET(e
->flags
, ENTRY_VALIDATED
);
189 const bool copied
= copyFromShm(*e
, extras
);
191 // we copied everything we could to local memory; no more need to lock
192 map
->closeForReading(index
);
199 debugs(20, 3, HERE
<< "mem-loading failed; freeing " << index
);
200 map
->free(index
); // do not let others into the same trap
205 MemStore::get(String
const key
, STOREGETCLIENT aCallback
, void *aCallbackData
)
207 // XXX: not needed but Store parent forces us to implement this
208 fatal("MemStore::get(key,callback,data) should not be called");
212 MemStore::copyFromShm(StoreEntry
&e
, const MemStoreMap::Extras
&extras
)
214 const Ipc::Mem::PageId
&page
= extras
.page
;
216 StoreIOBuffer
sourceBuf(extras
.storedSize
, 0,
217 static_cast<char*>(PagePointer(page
)));
219 // XXX: We do not know the URLs yet, only the key, but we need to parse and
220 // store the response for the Root().get() callers to be happy because they
221 // expect IN_MEMORY entries to already have the response headers and body.
222 // At least one caller calls createMemObject() if there is not one, so
223 // we hide the true object until that happens (to avoid leaking TBD URLs).
224 e
.createMemObject("TBD", "TBD");
226 // emulate the usual Store code but w/o inapplicable checks and callbacks:
228 // from store_client::readBody():
229 HttpReply
*rep
= (HttpReply
*)e
.getReply();
230 const ssize_t end
= headersEnd(sourceBuf
.data
, sourceBuf
.length
);
231 if (!rep
->parseCharBuf(sourceBuf
.data
, end
)) {
232 debugs(20, DBG_IMPORTANT
, "Could not parse mem-cached headers: " << e
);
235 // local memory stores both headers and body
236 e
.mem_obj
->object_sz
= sourceBuf
.length
; // from StoreEntry::complete()
238 storeGetMemSpace(sourceBuf
.length
); // from StoreEntry::write()
240 assert(e
.mem_obj
->data_hdr
.write(sourceBuf
)); // from MemObject::write()
241 const int64_t written
= e
.mem_obj
->endOffset();
242 // we should write all because StoreEntry::write() never fails
243 assert(written
>= 0 &&
244 static_cast<size_t>(written
) == sourceBuf
.length
);
245 // would be nice to call validLength() here, but it needs e.key
247 debugs(20, 7, HERE
<< "mem-loaded all " << written
<< " bytes of " << e
<<
256 MemStore::keepInLocalMemory(const StoreEntry
&e
) const
258 if (!e
.memoryCachable()) {
259 debugs(20, 7, HERE
<< "Not memory cachable: " << e
);
260 return false; // will not cache due to entry state or properties
264 const int64_t loadedSize
= e
.mem_obj
->endOffset();
265 const int64_t expectedSize
= e
.mem_obj
->expectedReplySize(); // may be < 0
266 const int64_t ramSize
= max(loadedSize
, expectedSize
);
268 if (ramSize
> static_cast<int64_t>(Config
.Store
.maxInMemObjSize
)) {
269 debugs(20, 5, HERE
<< "Too big max(" <<
270 loadedSize
<< ", " << expectedSize
<< "): " << e
);
271 return false; // will not cache due to cachable entry size limits
274 if (!willFit(ramSize
)) {
275 debugs(20, 5, HERE
<< "Wont fit max(" <<
276 loadedSize
<< ", " << expectedSize
<< "): " << e
);
277 return false; // will not cache due to memory cache slot limit
284 MemStore::considerKeeping(StoreEntry
&e
)
286 if (!keepInLocalMemory(e
))
289 // since we copy everything at once, we can only keep complete entries
290 if (e
.store_status
!= STORE_OK
) {
291 debugs(20, 7, HERE
<< "Incomplete: " << e
);
297 const int64_t loadedSize
= e
.mem_obj
->endOffset();
298 const int64_t expectedSize
= e
.mem_obj
->expectedReplySize();
300 // objects of unknown size are not allowed into memory cache, for now
301 if (expectedSize
< 0) {
302 debugs(20, 5, HERE
<< "Unknown expected size: " << e
);
306 // since we copy everything at once, we can only keep fully loaded entries
307 if (loadedSize
!= expectedSize
) {
308 debugs(20, 7, HERE
<< "partially loaded: " << loadedSize
<< " != " <<
313 if (e
.mem_obj
->vary_headers
) {
314 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
315 debugs(20, 5, "Vary not yet supported: " << e
.mem_obj
->vary_headers
);
319 keep(e
); // may still fail
323 MemStore::willFit(int64_t need
) const
325 return need
<= static_cast<int64_t>(Ipc::Mem::PageSize());
328 /// allocates map slot and calls copyToShm to store the entry in shared memory
330 MemStore::keep(StoreEntry
&e
)
333 debugs(20, 5, HERE
<< "No map to mem-cache " << e
);
338 Ipc::StoreMapSlot
*slot
= map
->openForWriting(reinterpret_cast<const cache_key
*>(e
.key
), index
);
340 debugs(20, 5, HERE
<< "No room in mem-cache map to index " << e
);
344 MemStoreMap::Extras
&extras
= map
->extras(index
);
345 if (copyToShm(e
, extras
)) {
347 map
->closeForWriting(index
, false);
353 /// uses mem_hdr::copy() to copy local data to shared memory
355 MemStore::copyToShm(StoreEntry
&e
, MemStoreMap::Extras
&extras
)
357 Ipc::Mem::PageId page
;
358 if (!Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage
, page
)) {
359 debugs(20, 5, HERE
<< "No mem-cache page for " << e
);
360 return false; // GetPage is responsible for any cleanup on failures
363 const int64_t bufSize
= Ipc::Mem::PageSize();
364 const int64_t eSize
= e
.mem_obj
->endOffset();
366 StoreIOBuffer
sharedSpace(bufSize
, 0,
367 static_cast<char*>(PagePointer(page
)));
369 // check that we kept everything or purge incomplete/sparse cached entry
370 const ssize_t copied
= e
.mem_obj
->data_hdr
.copy(sharedSpace
);
371 if (eSize
!= copied
) {
372 debugs(20, 2, HERE
<< "Failed to mem-cache " << e
<< ": " <<
373 eSize
<< "!=" << copied
);
379 debugs(20, 7, HERE
<< "mem-cached all " << eSize
<< " bytes of " << e
<<
382 theCurrentSize
+= Ipc::Mem::PageSize();
383 // remember storage location and size
385 extras
.storedSize
= copied
;
390 MemStore::cleanReadable(const sfileno fileno
)
392 Ipc::Mem::PutPage(map
->extras(fileno
).page
);
393 theCurrentSize
-= Ipc::Mem::PageSize();
396 /// calculates maximum number of entries we need to store and map
398 MemStore::EntryLimit()
400 if (!Config
.memShared
|| !Config
.memMaxSize
)
401 return 0; // no memory cache configured
403 const int64_t entrySize
= Ipc::Mem::PageSize(); // for now
404 const int64_t entryLimit
= Config
.memMaxSize
/ entrySize
;
408 /// reports our needs for shared memory pages to Ipc::Mem::Pages
409 class MemStoreClaimMemoryNeedsRr
: public RegisteredRunner
412 /* RegisteredRunner API */
413 virtual void run(const RunnerRegistry
&r
);
416 RunnerRegistrationEntry(rrClaimMemoryNeeds
, MemStoreClaimMemoryNeedsRr
);
419 MemStoreClaimMemoryNeedsRr::run(const RunnerRegistry
&)
421 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage
, MemStore::EntryLimit());
424 /// decides whether to use a shared memory cache or checks its configuration
425 class MemStoreCfgRr
: public ::RegisteredRunner
428 /* RegisteredRunner API */
429 virtual void run(const RunnerRegistry
&);
432 RunnerRegistrationEntry(rrFinalizeConfig
, MemStoreCfgRr
);
434 void MemStoreCfgRr::run(const RunnerRegistry
&r
)
436 // decide whether to use a shared memory cache if the user did not specify
437 if (!Config
.memShared
.configured()) {
438 Config
.memShared
.configure(Ipc::Atomic::Enabled() &&
439 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
440 Config
.memMaxSize
> 0);
441 } else if (Config
.memShared
&& !Ipc::Atomic::Enabled()) {
442 // bail if the user wants shared memory cache but we cannot support it
443 fatal("memory_cache_shared is on, but no support for atomic operations detected");
444 } else if (Config
.memShared
&& !Ipc::Mem::Segment::Enabled()) {
445 fatal("memory_cache_shared is on, but no support for shared memory detected");
446 } else if (Config
.memShared
&& !UsingSmp()) {
447 debugs(20, DBG_IMPORTANT
, "WARNING: memory_cache_shared is on, but only"
448 " a single worker is running");
452 /// initializes shared memory segments used by MemStore
453 class MemStoreRr
: public Ipc::Mem::RegisteredRunner
456 /* RegisteredRunner API */
457 MemStoreRr(): owner(NULL
) {}
458 virtual void run(const RunnerRegistry
&);
459 virtual ~MemStoreRr();
462 virtual void create(const RunnerRegistry
&);
465 MemStoreMap::Owner
*owner
;
468 RunnerRegistrationEntry(rrAfterConfig
, MemStoreRr
);
470 void MemStoreRr::run(const RunnerRegistry
&r
)
472 assert(Config
.memShared
.configured());
473 Ipc::Mem::RegisteredRunner::run(r
);
476 void MemStoreRr::create(const RunnerRegistry
&)
478 if (!Config
.memShared
)
482 const int64_t entryLimit
= MemStore::EntryLimit();
483 if (entryLimit
<= 0) {
484 if (Config
.memMaxSize
> 0) {
485 debugs(20, DBG_IMPORTANT
, "WARNING: mem-cache size is too small ("
486 << (Config
.memMaxSize
/ 1024.0) << " KB), should be >= " <<
487 (Ipc::Mem::PageSize() / 1024.0) << " KB");
489 return; // no memory cache configured or a misconfiguration
491 owner
= MemStoreMap::Init(ShmLabel
, entryLimit
);
494 MemStoreRr::~MemStoreRr()