]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemStore.cc
Restore memory caching ability lost since r11969.
[thirdparty/squid.git] / src / MemStore.cc
1 /*
2 * $Id$
3 *
4 * DEBUG: section 20 Memory Cache
5 *
6 */
7
8 #include "squid.h"
9 #include "base/RunnersRegistry.h"
10 #include "ipc/mem/Page.h"
11 #include "ipc/mem/Pages.h"
12 #include "MemObject.h"
13 #include "MemStore.h"
14 #include "StoreStats.h"
15 #include "HttpReply.h"
16
17 /// shared memory segment path to use for MemStore maps
18 static const char *ShmLabel = "cache_mem";
19
20 // XXX: support storage using more than one page per entry
21
22 MemStore::MemStore(): map(NULL), theCurrentSize(0)
23 {
24 }
25
26 MemStore::~MemStore()
27 {
28 delete map;
29 }
30
31 void
32 MemStore::init()
33 {
34 const int64_t entryLimit = EntryLimit();
35 if (entryLimit <= 0)
36 return; // no memory cache configured or a misconfiguration
37
38 const int64_t diskMaxSize = Store::Root().maxObjectSize();
39 const int64_t memMaxSize = maxObjectSize();
40 if (diskMaxSize == -1) {
41 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
42 "is unlimited but mem-cache maximum object size is " <<
43 memMaxSize / 1024.0 << " KB");
44 } else if (diskMaxSize > memMaxSize) {
45 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
46 "is too large for mem-cache: " <<
47 diskMaxSize / 1024.0 << " KB > " <<
48 memMaxSize / 1024.0 << " KB");
49 }
50
51 map = new MemStoreMap(ShmLabel);
52 map->cleaner = this;
53 }
54
55 void
56 MemStore::getStats(StoreInfoStats &stats) const
57 {
58 const size_t pageSize = Ipc::Mem::PageSize();
59
60 stats.mem.shared = true;
61 stats.mem.capacity =
62 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
63 stats.mem.size =
64 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
65 stats.mem.count = currentCount();
66 }
67
68 void
69 MemStore::stat(StoreEntry &e) const
70 {
71 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
72
73 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", Config.memMaxSize/1024.0);
74
75 if (map) {
76 const int limit = map->entryLimit();
77 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
78 if (limit > 0) {
79 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
80 currentCount(), (100.0 * currentCount() / limit));
81
82 if (limit < 100) { // XXX: otherwise too expensive to count
83 Ipc::ReadWriteLockStats stats;
84 map->updateStats(stats);
85 stats.dump(e);
86 }
87 }
88 }
89 }
90
91 void
92 MemStore::maintain()
93 {
94 }
95
96 uint64_t
97 MemStore::minSize() const
98 {
99 return 0; // XXX: irrelevant, but Store parent forces us to implement this
100 }
101
102 uint64_t
103 MemStore::maxSize() const
104 {
105 return 0; // XXX: make configurable
106 }
107
108 uint64_t
109 MemStore::currentSize() const
110 {
111 return theCurrentSize;
112 }
113
114 uint64_t
115 MemStore::currentCount() const
116 {
117 return map ? map->entryCount() : 0;
118 }
119
120 int64_t
121 MemStore::maxObjectSize() const
122 {
123 return Ipc::Mem::PageSize();
124 }
125
126 void
127 MemStore::reference(StoreEntry &)
128 {
129 }
130
131 bool
132 MemStore::dereference(StoreEntry &)
133 {
134 // no need to keep e in the global store_table for us; we have our own map
135 return false;
136 }
137
138 int
139 MemStore::callback()
140 {
141 return 0;
142 }
143
144 StoreSearch *
145 MemStore::search(String const, HttpRequest *)
146 {
147 fatal("not implemented");
148 return NULL;
149 }
150
151 StoreEntry *
152 MemStore::get(const cache_key *key)
153 {
154 if (!map)
155 return NULL;
156
157 // XXX: replace sfileno with a bigger word (sfileno is only for cache_dirs)
158 sfileno index;
159 const Ipc::StoreMapSlot *const slot = map->openForReading(key, index);
160 if (!slot)
161 return NULL;
162
163 const Ipc::StoreMapSlot::Basics &basics = slot->basics;
164 const MemStoreMap::Extras &extras = map->extras(index);
165
166 // create a brand new store entry and initialize it with stored info
167 StoreEntry *e = new StoreEntry();
168 e->lock_count = 0;
169
170 e->swap_file_sz = basics.swap_file_sz;
171 e->lastref = basics.lastref;
172 e->timestamp = basics.timestamp;
173 e->expires = basics.expires;
174 e->lastmod = basics.lastmod;
175 e->refcount = basics.refcount;
176 e->flags = basics.flags;
177
178 e->store_status = STORE_OK;
179 e->mem_status = IN_MEMORY; // setMemStatus(IN_MEMORY) requires mem_obj
180 //e->swap_status = set in StoreEntry constructor to SWAPOUT_NONE;
181 e->ping_status = PING_NONE;
182
183 EBIT_SET(e->flags, ENTRY_CACHABLE);
184 EBIT_CLR(e->flags, RELEASE_REQUEST);
185 EBIT_CLR(e->flags, KEY_PRIVATE);
186 EBIT_SET(e->flags, ENTRY_VALIDATED);
187
188 const bool copied = copyFromShm(*e, extras);
189
190 // we copied everything we could to local memory; no more need to lock
191 map->closeForReading(index);
192
193 if (copied) {
194 e->hashInsert(key);
195 return e;
196 }
197
198 debugs(20, 3, HERE << "mem-loading failed; freeing " << index);
199 map->free(index); // do not let others into the same trap
200 return NULL;
201 }
202
203 void
204 MemStore::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
205 {
206 // XXX: not needed but Store parent forces us to implement this
207 fatal("MemStore::get(key,callback,data) should not be called");
208 }
209
210 bool
211 MemStore::copyFromShm(StoreEntry &e, const MemStoreMap::Extras &extras)
212 {
213 const Ipc::Mem::PageId &page = extras.page;
214
215 StoreIOBuffer sourceBuf(extras.storedSize, 0,
216 static_cast<char*>(PagePointer(page)));
217
218 // XXX: We do not know the URLs yet, only the key, but we need to parse and
219 // store the response for the Root().get() callers to be happy because they
220 // expect IN_MEMORY entries to already have the response headers and body.
221 // At least one caller calls createMemObject() if there is not one, so
222 // we hide the true object until that happens (to avoid leaking TBD URLs).
223 e.createMemObject("TBD", "TBD");
224
225 // emulate the usual Store code but w/o inapplicable checks and callbacks:
226
227 // from store_client::readBody():
228 HttpReply *rep = (HttpReply *)e.getReply();
229 const ssize_t end = headersEnd(sourceBuf.data, sourceBuf.length);
230 if (!rep->parseCharBuf(sourceBuf.data, end)) {
231 debugs(20, DBG_IMPORTANT, "Could not parse mem-cached headers: " << e);
232 return false;
233 }
234 // local memory stores both headers and body
235 e.mem_obj->object_sz = sourceBuf.length; // from StoreEntry::complete()
236
237 storeGetMemSpace(sourceBuf.length); // from StoreEntry::write()
238
239 assert(e.mem_obj->data_hdr.write(sourceBuf)); // from MemObject::write()
240 const int64_t written = e.mem_obj->endOffset();
241 // we should write all because StoreEntry::write() never fails
242 assert(written >= 0 &&
243 static_cast<size_t>(written) == sourceBuf.length);
244 // would be nice to call validLength() here, but it needs e.key
245
246 debugs(20, 7, HERE << "mem-loaded all " << written << " bytes of " << e <<
247 " from " << page);
248
249 e.hideMemObject();
250
251 return true;
252 }
253
254 bool
255 MemStore::keepInLocalMemory(const StoreEntry &e) const
256 {
257 if (!e.memoryCachable()) {
258 debugs(20, 7, HERE << "Not memory cachable: " << e);
259 return false; // will not cache due to entry state or properties
260 }
261
262 assert(e.mem_obj);
263 const int64_t loadedSize = e.mem_obj->endOffset();
264 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
265 const int64_t ramSize = max(loadedSize, expectedSize);
266
267 if (ramSize > static_cast<int64_t>(Config.Store.maxInMemObjSize)) {
268 debugs(20, 5, HERE << "Too big max(" <<
269 loadedSize << ", " << expectedSize << "): " << e);
270 return false; // will not cache due to cachable entry size limits
271 }
272
273 if (!willFit(ramSize)) {
274 debugs(20, 5, HERE << "Wont fit max(" <<
275 loadedSize << ", " << expectedSize << "): " << e);
276 return false; // will not cache due to memory cache slot limit
277 }
278
279 return true;
280 }
281
282 void
283 MemStore::considerKeeping(StoreEntry &e)
284 {
285 if (!keepInLocalMemory(e))
286 return;
287
288 // since we copy everything at once, we can only keep complete entries
289 if (e.store_status != STORE_OK) {
290 debugs(20, 7, HERE << "Incomplete: " << e);
291 return;
292 }
293
294 assert(e.mem_obj);
295
296 const int64_t loadedSize = e.mem_obj->endOffset();
297 const int64_t expectedSize = e.mem_obj->expectedReplySize();
298
299 // objects of unknown size are not allowed into memory cache, for now
300 if (expectedSize < 0) {
301 debugs(20, 5, HERE << "Unknown expected size: " << e);
302 return;
303 }
304
305 // since we copy everything at once, we can only keep fully loaded entries
306 if (loadedSize != expectedSize) {
307 debugs(20, 7, HERE << "partially loaded: " << loadedSize << " != " <<
308 expectedSize);
309 return;
310 }
311
312 keep(e); // may still fail
313 }
314
315 bool
316 MemStore::willFit(int64_t need) const
317 {
318 return need <= static_cast<int64_t>(Ipc::Mem::PageSize());
319 }
320
321 /// allocates map slot and calls copyToShm to store the entry in shared memory
322 void
323 MemStore::keep(StoreEntry &e)
324 {
325 if (!map) {
326 debugs(20, 5, HERE << "No map to mem-cache " << e);
327 return;
328 }
329
330 sfileno index = 0;
331 Ipc::StoreMapSlot *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
332 if (!slot) {
333 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
334 return;
335 }
336
337 MemStoreMap::Extras &extras = map->extras(index);
338 if (copyToShm(e, extras)) {
339 slot->set(e);
340 map->closeForWriting(index, false);
341 } else {
342 map->abortIo(index);
343 }
344 }
345
346 /// uses mem_hdr::copy() to copy local data to shared memory
347 bool
348 MemStore::copyToShm(StoreEntry &e, MemStoreMap::Extras &extras)
349 {
350 Ipc::Mem::PageId page;
351 if (!Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
352 debugs(20, 5, HERE << "No mem-cache page for " << e);
353 return false; // GetPage is responsible for any cleanup on failures
354 }
355
356 const int64_t bufSize = Ipc::Mem::PageSize();
357 const int64_t eSize = e.mem_obj->endOffset();
358
359 StoreIOBuffer sharedSpace(bufSize, 0,
360 static_cast<char*>(PagePointer(page)));
361
362 // check that we kept everything or purge incomplete/sparse cached entry
363 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
364 if (eSize != copied) {
365 debugs(20, 2, HERE << "Failed to mem-cache " << e << ": " <<
366 eSize << "!=" << copied);
367 // cleanup
368 PutPage(page);
369 return false;
370 }
371
372 debugs(20, 7, HERE << "mem-cached all " << eSize << " bytes of " << e <<
373 " in " << page);
374
375 theCurrentSize += Ipc::Mem::PageSize();
376 // remember storage location and size
377 extras.page = page;
378 extras.storedSize = copied;
379 return true;
380 }
381
382 void
383 MemStore::cleanReadable(const sfileno fileno)
384 {
385 Ipc::Mem::PutPage(map->extras(fileno).page);
386 theCurrentSize -= Ipc::Mem::PageSize();
387 }
388
389 /// calculates maximum number of entries we need to store and map
390 int64_t
391 MemStore::EntryLimit()
392 {
393 if (!Config.memShared || !Config.memMaxSize)
394 return 0; // no memory cache configured
395
396 const int64_t entrySize = Ipc::Mem::PageSize(); // for now
397 const int64_t entryLimit = Config.memMaxSize / entrySize;
398 return entryLimit;
399 }
400
401
402 /// reports our needs for shared memory pages to Ipc::Mem::Pages
403 class MemStoreClaimMemoryNeedsRr: public RegisteredRunner
404 {
405 public:
406 /* RegisteredRunner API */
407 virtual void run(const RunnerRegistry &r);
408 };
409
410 RunnerRegistrationEntry(rrClaimMemoryNeeds, MemStoreClaimMemoryNeedsRr);
411
412
413 void
414 MemStoreClaimMemoryNeedsRr::run(const RunnerRegistry &)
415 {
416 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
417 }
418
419
420 /// decides whether to use a shared memory cache or checks its configuration
421 class MemStoreCfgRr: public ::RegisteredRunner
422 {
423 public:
424 /* RegisteredRunner API */
425 virtual void run(const RunnerRegistry &);
426 };
427
428 RunnerRegistrationEntry(rrFinalizeConfig, MemStoreCfgRr);
429
430 void MemStoreCfgRr::run(const RunnerRegistry &r)
431 {
432 // decide whether to use a shared memory cache if the user did not specify
433 if (!Config.memShared.configured()) {
434 Config.memShared.configure(Ipc::Atomic::Enabled() &&
435 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
436 Config.memMaxSize > 0);
437 } else if (Config.memShared && !Ipc::Atomic::Enabled()) {
438 // bail if the user wants shared memory cache but we cannot support it
439 fatal("memory_cache_shared is on, but no support for atomic operations detected");
440 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
441 fatal("memory_cache_shared is on, but no support for shared memory detected");
442 } else if (Config.memShared && !UsingSmp()) {
443 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
444 " a single worker is running");
445 }
446 }
447
448
449 /// initializes shared memory segments used by MemStore
450 class MemStoreRr: public Ipc::Mem::RegisteredRunner
451 {
452 public:
453 /* RegisteredRunner API */
454 MemStoreRr(): owner(NULL) {}
455 virtual void run(const RunnerRegistry &);
456 virtual ~MemStoreRr();
457
458 protected:
459 virtual void create(const RunnerRegistry &);
460
461 private:
462 MemStoreMap::Owner *owner;
463 };
464
465 RunnerRegistrationEntry(rrAfterConfig, MemStoreRr);
466
467
468 void MemStoreRr::run(const RunnerRegistry &r)
469 {
470 assert(Config.memShared.configured());
471 Ipc::Mem::RegisteredRunner::run(r);
472 }
473
474 void MemStoreRr::create(const RunnerRegistry &)
475 {
476 if (!Config.memShared)
477 return;
478
479 Must(!owner);
480 const int64_t entryLimit = MemStore::EntryLimit();
481 if (entryLimit <= 0) {
482 if (Config.memMaxSize > 0) {
483 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small ("
484 << (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
485 (Ipc::Mem::PageSize() / 1024.0) << " KB");
486 }
487 return; // no memory cache configured or a misconfiguration
488 }
489 owner = MemStoreMap::Init(ShmLabel, entryLimit);
490 }
491
492 MemStoreRr::~MemStoreRr()
493 {
494 delete owner;
495 }