4 * DEBUG: section 20 Memory Cache
9 #include "base/RunnersRegistry.h"
10 #include "HttpReply.h"
11 #include "ipc/mem/Page.h"
12 #include "ipc/mem/Pages.h"
13 #include "MemObject.h"
16 #include "StoreStats.h"
18 /// shared memory segment path to use for MemStore maps
19 static const char *ShmLabel
= "cache_mem";
21 // XXX: support storage using more than one page per entry
23 MemStore::MemStore(): map(NULL
), theCurrentSize(0)
35 const int64_t entryLimit
= EntryLimit();
37 return; // no memory cache configured or a misconfiguration
39 const int64_t diskMaxSize
= Store::Root().maxObjectSize();
40 const int64_t memMaxSize
= maxObjectSize();
41 if (diskMaxSize
== -1) {
42 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
43 "is unlimited but mem-cache maximum object size is " <<
44 memMaxSize
/ 1024.0 << " KB");
45 } else if (diskMaxSize
> memMaxSize
) {
46 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
47 "is too large for mem-cache: " <<
48 diskMaxSize
/ 1024.0 << " KB > " <<
49 memMaxSize
/ 1024.0 << " KB");
52 map
= new MemStoreMap(ShmLabel
);
57 MemStore::getStats(StoreInfoStats
&stats
) const
59 const size_t pageSize
= Ipc::Mem::PageSize();
61 stats
.mem
.shared
= true;
63 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
65 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
66 stats
.mem
.count
= currentCount();
70 MemStore::stat(StoreEntry
&e
) const
72 storeAppendPrintf(&e
, "\n\nShared Memory Cache\n");
74 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", Config
.memMaxSize
/1024.0);
77 const int limit
= map
->entryLimit();
78 storeAppendPrintf(&e
, "Maximum entries: %9d\n", limit
);
80 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
81 currentCount(), (100.0 * currentCount() / limit
));
83 if (limit
< 100) { // XXX: otherwise too expensive to count
84 Ipc::ReadWriteLockStats stats
;
85 map
->updateStats(stats
);
98 MemStore::minSize() const
100 return 0; // XXX: irrelevant, but Store parent forces us to implement this
104 MemStore::maxSize() const
106 return 0; // XXX: make configurable
110 MemStore::currentSize() const
112 return theCurrentSize
;
116 MemStore::currentCount() const
118 return map
? map
->entryCount() : 0;
122 MemStore::maxObjectSize() const
124 return Ipc::Mem::PageSize();
128 MemStore::reference(StoreEntry
&)
133 MemStore::dereference(StoreEntry
&)
135 // no need to keep e in the global store_table for us; we have our own map
146 MemStore::search(String
const, HttpRequest
*)
148 fatal("not implemented");
153 MemStore::get(const cache_key
*key
)
158 // XXX: replace sfileno with a bigger word (sfileno is only for cache_dirs)
160 const Ipc::StoreMapSlot
*const slot
= map
->openForReading(key
, index
);
164 const Ipc::StoreMapSlot::Basics
&basics
= slot
->basics
;
165 const MemStoreMap::Extras
&extras
= map
->extras(index
);
167 // create a brand new store entry and initialize it with stored info
168 StoreEntry
*e
= new StoreEntry();
171 e
->swap_file_sz
= basics
.swap_file_sz
;
172 e
->lastref
= basics
.lastref
;
173 e
->timestamp
= basics
.timestamp
;
174 e
->expires
= basics
.expires
;
175 e
->lastmod
= basics
.lastmod
;
176 e
->refcount
= basics
.refcount
;
177 e
->flags
= basics
.flags
;
179 e
->store_status
= STORE_OK
;
180 e
->mem_status
= IN_MEMORY
; // setMemStatus(IN_MEMORY) requires mem_obj
181 //e->swap_status = set in StoreEntry constructor to SWAPOUT_NONE;
182 e
->ping_status
= PING_NONE
;
184 EBIT_SET(e
->flags
, ENTRY_CACHABLE
);
185 EBIT_CLR(e
->flags
, RELEASE_REQUEST
);
186 EBIT_CLR(e
->flags
, KEY_PRIVATE
);
187 EBIT_SET(e
->flags
, ENTRY_VALIDATED
);
189 const bool copied
= copyFromShm(*e
, extras
);
191 // we copied everything we could to local memory; no more need to lock
192 map
->closeForReading(index
);
199 debugs(20, 3, HERE
<< "mem-loading failed; freeing " << index
);
200 map
->free(index
); // do not let others into the same trap
205 MemStore::get(String
const key
, STOREGETCLIENT aCallback
, void *aCallbackData
)
207 // XXX: not needed but Store parent forces us to implement this
208 fatal("MemStore::get(key,callback,data) should not be called");
212 MemStore::copyFromShm(StoreEntry
&e
, const MemStoreMap::Extras
&extras
)
214 const Ipc::Mem::PageId
&page
= extras
.page
;
216 StoreIOBuffer
sourceBuf(extras
.storedSize
, 0,
217 static_cast<char*>(PagePointer(page
)));
219 // XXX: We do not know the URLs yet, only the key, but we need to parse and
220 // store the response for the Root().get() callers to be happy because they
221 // expect IN_MEMORY entries to already have the response headers and body.
222 // At least one caller calls createMemObject() if there is not one, so
223 // we hide the true object until that happens (to avoid leaking TBD URLs).
224 e
.createMemObject("TBD", "TBD");
226 // emulate the usual Store code but w/o inapplicable checks and callbacks:
228 // from store_client::readBody():
229 HttpReply
*rep
= (HttpReply
*)e
.getReply();
230 const ssize_t end
= headersEnd(sourceBuf
.data
, sourceBuf
.length
);
231 if (!rep
->parseCharBuf(sourceBuf
.data
, end
)) {
232 debugs(20, DBG_IMPORTANT
, "Could not parse mem-cached headers: " << e
);
235 // local memory stores both headers and body
236 e
.mem_obj
->object_sz
= sourceBuf
.length
; // from StoreEntry::complete()
238 storeGetMemSpace(sourceBuf
.length
); // from StoreEntry::write()
240 assert(e
.mem_obj
->data_hdr
.write(sourceBuf
)); // from MemObject::write()
241 const int64_t written
= e
.mem_obj
->endOffset();
242 // we should write all because StoreEntry::write() never fails
243 assert(written
>= 0 &&
244 static_cast<size_t>(written
) == sourceBuf
.length
);
245 // would be nice to call validLength() here, but it needs e.key
247 debugs(20, 7, HERE
<< "mem-loaded all " << written
<< " bytes of " << e
<<
256 MemStore::keepInLocalMemory(const StoreEntry
&e
) const
258 if (!e
.memoryCachable()) {
259 debugs(20, 7, HERE
<< "Not memory cachable: " << e
);
260 return false; // will not cache due to entry state or properties
264 const int64_t loadedSize
= e
.mem_obj
->endOffset();
265 const int64_t expectedSize
= e
.mem_obj
->expectedReplySize(); // may be < 0
266 const int64_t ramSize
= max(loadedSize
, expectedSize
);
268 if (ramSize
> static_cast<int64_t>(Config
.Store
.maxInMemObjSize
)) {
269 debugs(20, 5, HERE
<< "Too big max(" <<
270 loadedSize
<< ", " << expectedSize
<< "): " << e
);
271 return false; // will not cache due to cachable entry size limits
274 if (!willFit(ramSize
)) {
275 debugs(20, 5, HERE
<< "Wont fit max(" <<
276 loadedSize
<< ", " << expectedSize
<< "): " << e
);
277 return false; // will not cache due to memory cache slot limit
284 MemStore::considerKeeping(StoreEntry
&e
)
286 if (!keepInLocalMemory(e
))
289 // since we copy everything at once, we can only keep complete entries
290 if (e
.store_status
!= STORE_OK
) {
291 debugs(20, 7, HERE
<< "Incomplete: " << e
);
297 const int64_t loadedSize
= e
.mem_obj
->endOffset();
298 const int64_t expectedSize
= e
.mem_obj
->expectedReplySize();
300 // objects of unknown size are not allowed into memory cache, for now
301 if (expectedSize
< 0) {
302 debugs(20, 5, HERE
<< "Unknown expected size: " << e
);
306 // since we copy everything at once, we can only keep fully loaded entries
307 if (loadedSize
!= expectedSize
) {
308 debugs(20, 7, HERE
<< "partially loaded: " << loadedSize
<< " != " <<
313 keep(e
); // may still fail
317 MemStore::willFit(int64_t need
) const
319 return need
<= static_cast<int64_t>(Ipc::Mem::PageSize());
322 /// allocates map slot and calls copyToShm to store the entry in shared memory
324 MemStore::keep(StoreEntry
&e
)
327 debugs(20, 5, HERE
<< "No map to mem-cache " << e
);
332 Ipc::StoreMapSlot
*slot
= map
->openForWriting(reinterpret_cast<const cache_key
*>(e
.key
), index
);
334 debugs(20, 5, HERE
<< "No room in mem-cache map to index " << e
);
338 MemStoreMap::Extras
&extras
= map
->extras(index
);
339 if (copyToShm(e
, extras
)) {
341 map
->closeForWriting(index
, false);
347 /// uses mem_hdr::copy() to copy local data to shared memory
349 MemStore::copyToShm(StoreEntry
&e
, MemStoreMap::Extras
&extras
)
351 Ipc::Mem::PageId page
;
352 if (!Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage
, page
)) {
353 debugs(20, 5, HERE
<< "No mem-cache page for " << e
);
354 return false; // GetPage is responsible for any cleanup on failures
357 const int64_t bufSize
= Ipc::Mem::PageSize();
358 const int64_t eSize
= e
.mem_obj
->endOffset();
360 StoreIOBuffer
sharedSpace(bufSize
, 0,
361 static_cast<char*>(PagePointer(page
)));
363 // check that we kept everything or purge incomplete/sparse cached entry
364 const ssize_t copied
= e
.mem_obj
->data_hdr
.copy(sharedSpace
);
365 if (eSize
!= copied
) {
366 debugs(20, 2, HERE
<< "Failed to mem-cache " << e
<< ": " <<
367 eSize
<< "!=" << copied
);
373 debugs(20, 7, HERE
<< "mem-cached all " << eSize
<< " bytes of " << e
<<
376 theCurrentSize
+= Ipc::Mem::PageSize();
377 // remember storage location and size
379 extras
.storedSize
= copied
;
384 MemStore::cleanReadable(const sfileno fileno
)
386 Ipc::Mem::PutPage(map
->extras(fileno
).page
);
387 theCurrentSize
-= Ipc::Mem::PageSize();
390 /// calculates maximum number of entries we need to store and map
392 MemStore::EntryLimit()
394 if (!Config
.memShared
|| !Config
.memMaxSize
)
395 return 0; // no memory cache configured
397 const int64_t entrySize
= Ipc::Mem::PageSize(); // for now
398 const int64_t entryLimit
= Config
.memMaxSize
/ entrySize
;
402 /// reports our needs for shared memory pages to Ipc::Mem::Pages
403 class MemStoreClaimMemoryNeedsRr
: public RegisteredRunner
406 /* RegisteredRunner API */
407 virtual void run(const RunnerRegistry
&r
);
410 RunnerRegistrationEntry(rrClaimMemoryNeeds
, MemStoreClaimMemoryNeedsRr
);
413 MemStoreClaimMemoryNeedsRr::run(const RunnerRegistry
&)
415 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage
, MemStore::EntryLimit());
418 /// decides whether to use a shared memory cache or checks its configuration
419 class MemStoreCfgRr
: public ::RegisteredRunner
422 /* RegisteredRunner API */
423 virtual void run(const RunnerRegistry
&);
426 RunnerRegistrationEntry(rrFinalizeConfig
, MemStoreCfgRr
);
428 void MemStoreCfgRr::run(const RunnerRegistry
&r
)
430 // decide whether to use a shared memory cache if the user did not specify
431 if (!Config
.memShared
.configured()) {
432 Config
.memShared
.configure(Ipc::Atomic::Enabled() &&
433 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
434 Config
.memMaxSize
> 0);
435 } else if (Config
.memShared
&& !Ipc::Atomic::Enabled()) {
436 // bail if the user wants shared memory cache but we cannot support it
437 fatal("memory_cache_shared is on, but no support for atomic operations detected");
438 } else if (Config
.memShared
&& !Ipc::Mem::Segment::Enabled()) {
439 fatal("memory_cache_shared is on, but no support for shared memory detected");
440 } else if (Config
.memShared
&& !UsingSmp()) {
441 debugs(20, DBG_IMPORTANT
, "WARNING: memory_cache_shared is on, but only"
442 " a single worker is running");
446 /// initializes shared memory segments used by MemStore
447 class MemStoreRr
: public Ipc::Mem::RegisteredRunner
450 /* RegisteredRunner API */
451 MemStoreRr(): owner(NULL
) {}
452 virtual void run(const RunnerRegistry
&);
453 virtual ~MemStoreRr();
456 virtual void create(const RunnerRegistry
&);
459 MemStoreMap::Owner
*owner
;
462 RunnerRegistrationEntry(rrAfterConfig
, MemStoreRr
);
464 void MemStoreRr::run(const RunnerRegistry
&r
)
466 assert(Config
.memShared
.configured());
467 Ipc::Mem::RegisteredRunner::run(r
);
470 void MemStoreRr::create(const RunnerRegistry
&)
472 if (!Config
.memShared
)
476 const int64_t entryLimit
= MemStore::EntryLimit();
477 if (entryLimit
<= 0) {
478 if (Config
.memMaxSize
> 0) {
479 debugs(20, DBG_IMPORTANT
, "WARNING: mem-cache size is too small ("
480 << (Config
.memMaxSize
/ 1024.0) << " KB), should be >= " <<
481 (Ipc::Mem::PageSize() / 1024.0) << " KB");
483 return; // no memory cache configured or a misconfiguration
485 owner
= MemStoreMap::Init(ShmLabel
, entryLimit
);
488 MemStoreRr::~MemStoreRr()