2 * DEBUG: section 20 Memory Cache
7 #include "base/RunnersRegistry.h"
9 #include "ipc/mem/Page.h"
10 #include "ipc/mem/Pages.h"
11 #include "MemObject.h"
13 #include "mime_header.h"
14 #include "StoreStats.h"
17 /// shared memory segment path to use for MemStore maps
18 static const char *ShmLabel
= "cache_mem";
20 // XXX: support storage using more than one page per entry
22 MemStore::MemStore(): map(NULL
), theCurrentSize(0)
34 const int64_t entryLimit
= EntryLimit();
36 return; // no memory cache configured or a misconfiguration
38 const int64_t diskMaxSize
= Store::Root().maxObjectSize();
39 const int64_t memMaxSize
= maxObjectSize();
40 if (diskMaxSize
== -1) {
41 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
42 "is unlimited but mem-cache maximum object size is " <<
43 memMaxSize
/ 1024.0 << " KB");
44 } else if (diskMaxSize
> memMaxSize
) {
45 debugs(20, DBG_IMPORTANT
, "WARNING: disk-cache maximum object size "
46 "is too large for mem-cache: " <<
47 diskMaxSize
/ 1024.0 << " KB > " <<
48 memMaxSize
/ 1024.0 << " KB");
51 map
= new MemStoreMap(ShmLabel
);
56 MemStore::getStats(StoreInfoStats
&stats
) const
58 const size_t pageSize
= Ipc::Mem::PageSize();
60 stats
.mem
.shared
= true;
62 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage
) * pageSize
;
64 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage
) * pageSize
;
65 stats
.mem
.count
= currentCount();
69 MemStore::stat(StoreEntry
&e
) const
71 storeAppendPrintf(&e
, "\n\nShared Memory Cache\n");
73 storeAppendPrintf(&e
, "Maximum Size: %.0f KB\n", Config
.memMaxSize
/1024.0);
76 const int limit
= map
->entryLimit();
77 storeAppendPrintf(&e
, "Maximum entries: %9d\n", limit
);
79 storeAppendPrintf(&e
, "Current entries: %" PRId64
" %.2f%%\n",
80 currentCount(), (100.0 * currentCount() / limit
));
82 if (limit
< 100) { // XXX: otherwise too expensive to count
83 Ipc::ReadWriteLockStats stats
;
84 map
->updateStats(stats
);
97 MemStore::minSize() const
99 return 0; // XXX: irrelevant, but Store parent forces us to implement this
103 MemStore::maxSize() const
105 return 0; // XXX: make configurable
109 MemStore::currentSize() const
111 return theCurrentSize
;
115 MemStore::currentCount() const
117 return map
? map
->entryCount() : 0;
121 MemStore::maxObjectSize() const
123 return Ipc::Mem::PageSize();
127 MemStore::reference(StoreEntry
&)
132 MemStore::dereference(StoreEntry
&)
134 // no need to keep e in the global store_table for us; we have our own map
145 MemStore::search(String
const, HttpRequest
*)
147 fatal("not implemented");
152 MemStore::get(const cache_key
*key
)
157 // XXX: replace sfileno with a bigger word (sfileno is only for cache_dirs)
159 const Ipc::StoreMapSlot
*const slot
= map
->openForReading(key
, index
);
163 const Ipc::StoreMapSlot::Basics
&basics
= slot
->basics
;
164 const MemStoreMap::Extras
&extras
= map
->extras(index
);
166 // create a brand new store entry and initialize it with stored info
167 StoreEntry
*e
= new StoreEntry();
170 e
->swap_file_sz
= basics
.swap_file_sz
;
171 e
->lastref
= basics
.lastref
;
172 e
->timestamp
= basics
.timestamp
;
173 e
->expires
= basics
.expires
;
174 e
->lastmod
= basics
.lastmod
;
175 e
->refcount
= basics
.refcount
;
176 e
->flags
= basics
.flags
;
178 e
->store_status
= STORE_OK
;
179 e
->mem_status
= IN_MEMORY
; // setMemStatus(IN_MEMORY) requires mem_obj
180 //e->swap_status = set in StoreEntry constructor to SWAPOUT_NONE;
181 e
->ping_status
= PING_NONE
;
183 EBIT_SET(e
->flags
, ENTRY_CACHABLE
);
184 EBIT_CLR(e
->flags
, RELEASE_REQUEST
);
185 EBIT_CLR(e
->flags
, KEY_PRIVATE
);
186 EBIT_SET(e
->flags
, ENTRY_VALIDATED
);
188 const bool copied
= copyFromShm(*e
, extras
);
190 // we copied everything we could to local memory; no more need to lock
191 map
->closeForReading(index
);
198 debugs(20, 3, HERE
<< "mem-loading failed; freeing " << index
);
199 map
->free(index
); // do not let others into the same trap
204 MemStore::get(String
const key
, STOREGETCLIENT aCallback
, void *aCallbackData
)
206 // XXX: not needed but Store parent forces us to implement this
207 fatal("MemStore::get(key,callback,data) should not be called");
211 MemStore::copyFromShm(StoreEntry
&e
, const MemStoreMap::Extras
&extras
)
213 const Ipc::Mem::PageId
&page
= extras
.page
;
215 StoreIOBuffer
sourceBuf(extras
.storedSize
, 0,
216 static_cast<char*>(PagePointer(page
)));
218 // XXX: We do not know the URLs yet, only the key, but we need to parse and
219 // store the response for the Root().get() callers to be happy because they
220 // expect IN_MEMORY entries to already have the response headers and body.
221 // At least one caller calls createMemObject() if there is not one, so
222 // we hide the true object until that happens (to avoid leaking TBD URLs).
223 e
.createMemObject("TBD", "TBD");
225 // emulate the usual Store code but w/o inapplicable checks and callbacks:
227 // from store_client::readBody():
228 HttpReply
*rep
= (HttpReply
*)e
.getReply();
229 const ssize_t end
= headersEnd(sourceBuf
.data
, sourceBuf
.length
);
230 if (!rep
->parseCharBuf(sourceBuf
.data
, end
)) {
231 debugs(20, DBG_IMPORTANT
, "Could not parse mem-cached headers: " << e
);
234 // local memory stores both headers and body
235 e
.mem_obj
->object_sz
= sourceBuf
.length
; // from StoreEntry::complete()
237 storeGetMemSpace(sourceBuf
.length
); // from StoreEntry::write()
239 assert(e
.mem_obj
->data_hdr
.write(sourceBuf
)); // from MemObject::write()
240 const int64_t written
= e
.mem_obj
->endOffset();
241 // we should write all because StoreEntry::write() never fails
242 assert(written
>= 0 &&
243 static_cast<size_t>(written
) == sourceBuf
.length
);
244 // would be nice to call validLength() here, but it needs e.key
246 debugs(20, 7, HERE
<< "mem-loaded all " << written
<< " bytes of " << e
<<
255 MemStore::keepInLocalMemory(const StoreEntry
&e
) const
257 if (!e
.memoryCachable()) {
258 debugs(20, 7, HERE
<< "Not memory cachable: " << e
);
259 return false; // will not cache due to entry state or properties
263 const int64_t loadedSize
= e
.mem_obj
->endOffset();
264 const int64_t expectedSize
= e
.mem_obj
->expectedReplySize(); // may be < 0
265 const int64_t ramSize
= max(loadedSize
, expectedSize
);
267 if (ramSize
> static_cast<int64_t>(Config
.Store
.maxInMemObjSize
)) {
268 debugs(20, 5, HERE
<< "Too big max(" <<
269 loadedSize
<< ", " << expectedSize
<< "): " << e
);
270 return false; // will not cache due to cachable entry size limits
273 if (!willFit(ramSize
)) {
274 debugs(20, 5, HERE
<< "Wont fit max(" <<
275 loadedSize
<< ", " << expectedSize
<< "): " << e
);
276 return false; // will not cache due to memory cache slot limit
283 MemStore::considerKeeping(StoreEntry
&e
)
285 if (!keepInLocalMemory(e
))
288 // since we copy everything at once, we can only keep complete entries
289 if (e
.store_status
!= STORE_OK
) {
290 debugs(20, 7, HERE
<< "Incomplete: " << e
);
296 const int64_t loadedSize
= e
.mem_obj
->endOffset();
297 const int64_t expectedSize
= e
.mem_obj
->expectedReplySize();
299 // objects of unknown size are not allowed into memory cache, for now
300 if (expectedSize
< 0) {
301 debugs(20, 5, HERE
<< "Unknown expected size: " << e
);
305 // since we copy everything at once, we can only keep fully loaded entries
306 if (loadedSize
!= expectedSize
) {
307 debugs(20, 7, HERE
<< "partially loaded: " << loadedSize
<< " != " <<
312 keep(e
); // may still fail
316 MemStore::willFit(int64_t need
) const
318 return need
<= static_cast<int64_t>(Ipc::Mem::PageSize());
321 /// allocates map slot and calls copyToShm to store the entry in shared memory
323 MemStore::keep(StoreEntry
&e
)
326 debugs(20, 5, HERE
<< "No map to mem-cache " << e
);
331 Ipc::StoreMapSlot
*slot
= map
->openForWriting(reinterpret_cast<const cache_key
*>(e
.key
), index
);
333 debugs(20, 5, HERE
<< "No room in mem-cache map to index " << e
);
337 MemStoreMap::Extras
&extras
= map
->extras(index
);
338 if (copyToShm(e
, extras
)) {
340 map
->closeForWriting(index
, false);
346 /// uses mem_hdr::copy() to copy local data to shared memory
348 MemStore::copyToShm(StoreEntry
&e
, MemStoreMap::Extras
&extras
)
350 Ipc::Mem::PageId page
;
351 if (!Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage
, page
)) {
352 debugs(20, 5, HERE
<< "No mem-cache page for " << e
);
353 return false; // GetPage is responsible for any cleanup on failures
356 const int64_t bufSize
= Ipc::Mem::PageSize();
357 const int64_t eSize
= e
.mem_obj
->endOffset();
359 StoreIOBuffer
sharedSpace(bufSize
, 0,
360 static_cast<char*>(PagePointer(page
)));
362 // check that we kept everything or purge incomplete/sparse cached entry
363 const ssize_t copied
= e
.mem_obj
->data_hdr
.copy(sharedSpace
);
364 if (eSize
!= copied
) {
365 debugs(20, 2, HERE
<< "Failed to mem-cache " << e
<< ": " <<
366 eSize
<< "!=" << copied
);
372 debugs(20, 7, HERE
<< "mem-cached all " << eSize
<< " bytes of " << e
<<
375 theCurrentSize
+= Ipc::Mem::PageSize();
376 // remember storage location and size
378 extras
.storedSize
= copied
;
383 MemStore::cleanReadable(const sfileno fileno
)
385 Ipc::Mem::PutPage(map
->extras(fileno
).page
);
386 theCurrentSize
-= Ipc::Mem::PageSize();
389 /// calculates maximum number of entries we need to store and map
391 MemStore::EntryLimit()
393 if (!Config
.memShared
|| !Config
.memMaxSize
)
394 return 0; // no memory cache configured
396 const int64_t entrySize
= Ipc::Mem::PageSize(); // for now
397 const int64_t entryLimit
= Config
.memMaxSize
/ entrySize
;
401 /// reports our needs for shared memory pages to Ipc::Mem::Pages
402 class MemStoreClaimMemoryNeedsRr
: public RegisteredRunner
405 /* RegisteredRunner API */
406 virtual void run(const RunnerRegistry
&r
);
409 RunnerRegistrationEntry(rrClaimMemoryNeeds
, MemStoreClaimMemoryNeedsRr
);
412 MemStoreClaimMemoryNeedsRr::run(const RunnerRegistry
&)
414 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage
, MemStore::EntryLimit());
417 /// decides whether to use a shared memory cache or checks its configuration
418 class MemStoreCfgRr
: public ::RegisteredRunner
421 /* RegisteredRunner API */
422 virtual void run(const RunnerRegistry
&);
425 RunnerRegistrationEntry(rrFinalizeConfig
, MemStoreCfgRr
);
427 void MemStoreCfgRr::run(const RunnerRegistry
&r
)
429 // decide whether to use a shared memory cache if the user did not specify
430 if (!Config
.memShared
.configured()) {
431 Config
.memShared
.configure(Ipc::Atomic::Enabled() &&
432 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
433 Config
.memMaxSize
> 0);
434 } else if (Config
.memShared
&& !Ipc::Atomic::Enabled()) {
435 // bail if the user wants shared memory cache but we cannot support it
436 fatal("memory_cache_shared is on, but no support for atomic operations detected");
437 } else if (Config
.memShared
&& !Ipc::Mem::Segment::Enabled()) {
438 fatal("memory_cache_shared is on, but no support for shared memory detected");
439 } else if (Config
.memShared
&& !UsingSmp()) {
440 debugs(20, DBG_IMPORTANT
, "WARNING: memory_cache_shared is on, but only"
441 " a single worker is running");
445 /// initializes shared memory segments used by MemStore
446 class MemStoreRr
: public Ipc::Mem::RegisteredRunner
449 /* RegisteredRunner API */
450 MemStoreRr(): owner(NULL
) {}
451 virtual void run(const RunnerRegistry
&);
452 virtual ~MemStoreRr();
455 virtual void create(const RunnerRegistry
&);
458 MemStoreMap::Owner
*owner
;
461 RunnerRegistrationEntry(rrAfterConfig
, MemStoreRr
);
463 void MemStoreRr::run(const RunnerRegistry
&r
)
465 assert(Config
.memShared
.configured());
466 Ipc::Mem::RegisteredRunner::run(r
);
469 void MemStoreRr::create(const RunnerRegistry
&)
471 if (!Config
.memShared
)
475 const int64_t entryLimit
= MemStore::EntryLimit();
476 if (entryLimit
<= 0) {
477 if (Config
.memMaxSize
> 0) {
478 debugs(20, DBG_IMPORTANT
, "WARNING: mem-cache size is too small ("
479 << (Config
.memMaxSize
/ 1024.0) << " KB), should be >= " <<
480 (Ipc::Mem::PageSize() / 1024.0) << " KB");
482 return; // no memory cache configured or a misconfiguration
484 owner
= MemStoreMap::Init(ShmLabel
, entryLimit
);
487 MemStoreRr::~MemStoreRr()