]> git.ipfire.org Git - thirdparty/squid.git/blob - src/MemStore.cc
Merged from trunk
[thirdparty/squid.git] / src / MemStore.cc
1 /*
2 * DEBUG: section 20 Memory Cache
3 *
4 */
5
6 #include "squid.h"
7 #include "base/RunnersRegistry.h"
8 #include "HttpReply.h"
9 #include "ipc/mem/Page.h"
10 #include "ipc/mem/Pages.h"
11 #include "MemObject.h"
12 #include "MemStore.h"
13 #include "mime_header.h"
14 #include "SquidConfig.h"
15 #include "StoreStats.h"
16 #include "tools.h"
17
18 /// shared memory segment path to use for MemStore maps
19 static const char *ShmLabel = "cache_mem";
20
21 // XXX: support storage using more than one page per entry
22
23 MemStore::MemStore(): map(NULL), theCurrentSize(0)
24 {
25 }
26
27 MemStore::~MemStore()
28 {
29 delete map;
30 }
31
32 void
33 MemStore::init()
34 {
35 const int64_t entryLimit = EntryLimit();
36 if (entryLimit <= 0)
37 return; // no memory cache configured or a misconfiguration
38
39 const int64_t diskMaxSize = Store::Root().maxObjectSize();
40 const int64_t memMaxSize = maxObjectSize();
41 if (diskMaxSize == -1) {
42 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
43 "is unlimited but mem-cache maximum object size is " <<
44 memMaxSize / 1024.0 << " KB");
45 } else if (diskMaxSize > memMaxSize) {
46 debugs(20, DBG_IMPORTANT, "WARNING: disk-cache maximum object size "
47 "is too large for mem-cache: " <<
48 diskMaxSize / 1024.0 << " KB > " <<
49 memMaxSize / 1024.0 << " KB");
50 }
51
52 map = new MemStoreMap(ShmLabel);
53 map->cleaner = this;
54 }
55
56 void
57 MemStore::getStats(StoreInfoStats &stats) const
58 {
59 const size_t pageSize = Ipc::Mem::PageSize();
60
61 stats.mem.shared = true;
62 stats.mem.capacity =
63 Ipc::Mem::PageLimit(Ipc::Mem::PageId::cachePage) * pageSize;
64 stats.mem.size =
65 Ipc::Mem::PageLevel(Ipc::Mem::PageId::cachePage) * pageSize;
66 stats.mem.count = currentCount();
67 }
68
69 void
70 MemStore::stat(StoreEntry &e) const
71 {
72 storeAppendPrintf(&e, "\n\nShared Memory Cache\n");
73
74 storeAppendPrintf(&e, "Maximum Size: %.0f KB\n", Config.memMaxSize/1024.0);
75
76 if (map) {
77 const int limit = map->entryLimit();
78 storeAppendPrintf(&e, "Maximum entries: %9d\n", limit);
79 if (limit > 0) {
80 storeAppendPrintf(&e, "Current entries: %" PRId64 " %.2f%%\n",
81 currentCount(), (100.0 * currentCount() / limit));
82
83 if (limit < 100) { // XXX: otherwise too expensive to count
84 Ipc::ReadWriteLockStats stats;
85 map->updateStats(stats);
86 stats.dump(e);
87 }
88 }
89 }
90 }
91
92 void
93 MemStore::maintain()
94 {
95 }
96
97 uint64_t
98 MemStore::minSize() const
99 {
100 return 0; // XXX: irrelevant, but Store parent forces us to implement this
101 }
102
103 uint64_t
104 MemStore::maxSize() const
105 {
106 return 0; // XXX: make configurable
107 }
108
109 uint64_t
110 MemStore::currentSize() const
111 {
112 return theCurrentSize;
113 }
114
115 uint64_t
116 MemStore::currentCount() const
117 {
118 return map ? map->entryCount() : 0;
119 }
120
121 int64_t
122 MemStore::maxObjectSize() const
123 {
124 return Ipc::Mem::PageSize();
125 }
126
127 void
128 MemStore::reference(StoreEntry &)
129 {
130 }
131
132 bool
133 MemStore::dereference(StoreEntry &, bool)
134 {
135 // no need to keep e in the global store_table for us; we have our own map
136 return false;
137 }
138
139 int
140 MemStore::callback()
141 {
142 return 0;
143 }
144
145 StoreSearch *
146 MemStore::search(String const, HttpRequest *)
147 {
148 fatal("not implemented");
149 return NULL;
150 }
151
152 StoreEntry *
153 MemStore::get(const cache_key *key)
154 {
155 if (!map)
156 return NULL;
157
158 // XXX: replace sfileno with a bigger word (sfileno is only for cache_dirs)
159 sfileno index;
160 const Ipc::StoreMapSlot *const slot = map->openForReading(key, index);
161 if (!slot)
162 return NULL;
163
164 const Ipc::StoreMapSlot::Basics &basics = slot->basics;
165 const MemStoreMap::Extras &extras = map->extras(index);
166
167 // create a brand new store entry and initialize it with stored info
168 StoreEntry *e = new StoreEntry();
169 e->lock_count = 0;
170
171 e->swap_file_sz = basics.swap_file_sz;
172 e->lastref = basics.lastref;
173 e->timestamp = basics.timestamp;
174 e->expires = basics.expires;
175 e->lastmod = basics.lastmod;
176 e->refcount = basics.refcount;
177 e->flags = basics.flags;
178
179 e->store_status = STORE_OK;
180 e->mem_status = IN_MEMORY; // setMemStatus(IN_MEMORY) requires mem_obj
181 //e->swap_status = set in StoreEntry constructor to SWAPOUT_NONE;
182 e->ping_status = PING_NONE;
183
184 EBIT_SET(e->flags, ENTRY_CACHABLE);
185 EBIT_CLR(e->flags, RELEASE_REQUEST);
186 EBIT_CLR(e->flags, KEY_PRIVATE);
187 EBIT_SET(e->flags, ENTRY_VALIDATED);
188
189 const bool copied = copyFromShm(*e, extras);
190
191 // we copied everything we could to local memory; no more need to lock
192 map->closeForReading(index);
193
194 if (copied) {
195 e->hashInsert(key);
196 return e;
197 }
198
199 debugs(20, 3, HERE << "mem-loading failed; freeing " << index);
200 map->free(index); // do not let others into the same trap
201 return NULL;
202 }
203
204 void
205 MemStore::get(String const key, STOREGETCLIENT aCallback, void *aCallbackData)
206 {
207 // XXX: not needed but Store parent forces us to implement this
208 fatal("MemStore::get(key,callback,data) should not be called");
209 }
210
211 bool
212 MemStore::copyFromShm(StoreEntry &e, const MemStoreMap::Extras &extras)
213 {
214 const Ipc::Mem::PageId &page = extras.page;
215
216 StoreIOBuffer sourceBuf(extras.storedSize, 0,
217 static_cast<char*>(PagePointer(page)));
218
219 // XXX: We do not know the URLs yet, only the key, but we need to parse and
220 // store the response for the Root().get() callers to be happy because they
221 // expect IN_MEMORY entries to already have the response headers and body.
222 // At least one caller calls createMemObject() if there is not one, so
223 // we hide the true object until that happens (to avoid leaking TBD URLs).
224 e.createMemObject("TBD", "TBD");
225
226 // emulate the usual Store code but w/o inapplicable checks and callbacks:
227
228 // from store_client::readBody():
229 HttpReply *rep = (HttpReply *)e.getReply();
230 const ssize_t end = headersEnd(sourceBuf.data, sourceBuf.length);
231 if (!rep->parseCharBuf(sourceBuf.data, end)) {
232 debugs(20, DBG_IMPORTANT, "Could not parse mem-cached headers: " << e);
233 return false;
234 }
235 // local memory stores both headers and body
236 e.mem_obj->object_sz = sourceBuf.length; // from StoreEntry::complete()
237
238 storeGetMemSpace(sourceBuf.length); // from StoreEntry::write()
239
240 assert(e.mem_obj->data_hdr.write(sourceBuf)); // from MemObject::write()
241 const int64_t written = e.mem_obj->endOffset();
242 // we should write all because StoreEntry::write() never fails
243 assert(written >= 0 &&
244 static_cast<size_t>(written) == sourceBuf.length);
245 // would be nice to call validLength() here, but it needs e.key
246
247 debugs(20, 7, HERE << "mem-loaded all " << written << " bytes of " << e <<
248 " from " << page);
249
250 e.hideMemObject();
251
252 return true;
253 }
254
255 bool
256 MemStore::keepInLocalMemory(const StoreEntry &e) const
257 {
258 if (!e.memoryCachable()) {
259 debugs(20, 7, HERE << "Not memory cachable: " << e);
260 return false; // will not cache due to entry state or properties
261 }
262
263 assert(e.mem_obj);
264 const int64_t loadedSize = e.mem_obj->endOffset();
265 const int64_t expectedSize = e.mem_obj->expectedReplySize(); // may be < 0
266 const int64_t ramSize = max(loadedSize, expectedSize);
267
268 if (ramSize > static_cast<int64_t>(Config.Store.maxInMemObjSize)) {
269 debugs(20, 5, HERE << "Too big max(" <<
270 loadedSize << ", " << expectedSize << "): " << e);
271 return false; // will not cache due to cachable entry size limits
272 }
273
274 if (!willFit(ramSize)) {
275 debugs(20, 5, HERE << "Wont fit max(" <<
276 loadedSize << ", " << expectedSize << "): " << e);
277 return false; // will not cache due to memory cache slot limit
278 }
279
280 return true;
281 }
282
283 void
284 MemStore::considerKeeping(StoreEntry &e)
285 {
286 if (!keepInLocalMemory(e))
287 return;
288
289 // since we copy everything at once, we can only keep complete entries
290 if (e.store_status != STORE_OK) {
291 debugs(20, 7, HERE << "Incomplete: " << e);
292 return;
293 }
294
295 assert(e.mem_obj);
296
297 const int64_t loadedSize = e.mem_obj->endOffset();
298 const int64_t expectedSize = e.mem_obj->expectedReplySize();
299
300 // objects of unknown size are not allowed into memory cache, for now
301 if (expectedSize < 0) {
302 debugs(20, 5, HERE << "Unknown expected size: " << e);
303 return;
304 }
305
306 // since we copy everything at once, we can only keep fully loaded entries
307 if (loadedSize != expectedSize) {
308 debugs(20, 7, HERE << "partially loaded: " << loadedSize << " != " <<
309 expectedSize);
310 return;
311 }
312
313 if (e.mem_obj->vary_headers) {
314 // XXX: We must store/load SerialisedMetaData to cache Vary in RAM
315 debugs(20, 5, "Vary not yet supported: " << e.mem_obj->vary_headers);
316 return;
317 }
318
319 keep(e); // may still fail
320 }
321
322 bool
323 MemStore::willFit(int64_t need) const
324 {
325 return need <= static_cast<int64_t>(Ipc::Mem::PageSize());
326 }
327
328 /// allocates map slot and calls copyToShm to store the entry in shared memory
329 void
330 MemStore::keep(StoreEntry &e)
331 {
332 if (!map) {
333 debugs(20, 5, HERE << "No map to mem-cache " << e);
334 return;
335 }
336
337 sfileno index = 0;
338 Ipc::StoreMapSlot *slot = map->openForWriting(reinterpret_cast<const cache_key *>(e.key), index);
339 if (!slot) {
340 debugs(20, 5, HERE << "No room in mem-cache map to index " << e);
341 return;
342 }
343
344 MemStoreMap::Extras &extras = map->extras(index);
345 if (copyToShm(e, extras)) {
346 slot->set(e);
347 map->closeForWriting(index, false);
348 } else {
349 map->abortIo(index);
350 }
351 }
352
353 /// uses mem_hdr::copy() to copy local data to shared memory
354 bool
355 MemStore::copyToShm(StoreEntry &e, MemStoreMap::Extras &extras)
356 {
357 Ipc::Mem::PageId page;
358 if (!Ipc::Mem::GetPage(Ipc::Mem::PageId::cachePage, page)) {
359 debugs(20, 5, HERE << "No mem-cache page for " << e);
360 return false; // GetPage is responsible for any cleanup on failures
361 }
362
363 const int64_t bufSize = Ipc::Mem::PageSize();
364 const int64_t eSize = e.mem_obj->endOffset();
365
366 StoreIOBuffer sharedSpace(bufSize, 0,
367 static_cast<char*>(PagePointer(page)));
368
369 // check that we kept everything or purge incomplete/sparse cached entry
370 const ssize_t copied = e.mem_obj->data_hdr.copy(sharedSpace);
371 if (eSize != copied) {
372 debugs(20, 2, HERE << "Failed to mem-cache " << e << ": " <<
373 eSize << "!=" << copied);
374 // cleanup
375 PutPage(page);
376 return false;
377 }
378
379 debugs(20, 7, HERE << "mem-cached all " << eSize << " bytes of " << e <<
380 " in " << page);
381
382 theCurrentSize += Ipc::Mem::PageSize();
383 // remember storage location and size
384 extras.page = page;
385 extras.storedSize = copied;
386 return true;
387 }
388
389 void
390 MemStore::cleanReadable(const sfileno fileno)
391 {
392 Ipc::Mem::PutPage(map->extras(fileno).page);
393 theCurrentSize -= Ipc::Mem::PageSize();
394 }
395
396 /// calculates maximum number of entries we need to store and map
397 int64_t
398 MemStore::EntryLimit()
399 {
400 if (!Config.memShared || !Config.memMaxSize)
401 return 0; // no memory cache configured
402
403 const int64_t entrySize = Ipc::Mem::PageSize(); // for now
404 const int64_t entryLimit = Config.memMaxSize / entrySize;
405 return entryLimit;
406 }
407
408 /// reports our needs for shared memory pages to Ipc::Mem::Pages
409 class MemStoreClaimMemoryNeedsRr: public RegisteredRunner
410 {
411 public:
412 /* RegisteredRunner API */
413 virtual void run(const RunnerRegistry &r);
414 };
415
416 RunnerRegistrationEntry(rrClaimMemoryNeeds, MemStoreClaimMemoryNeedsRr);
417
418 void
419 MemStoreClaimMemoryNeedsRr::run(const RunnerRegistry &)
420 {
421 Ipc::Mem::NotePageNeed(Ipc::Mem::PageId::cachePage, MemStore::EntryLimit());
422 }
423
424 /// decides whether to use a shared memory cache or checks its configuration
425 class MemStoreCfgRr: public ::RegisteredRunner
426 {
427 public:
428 /* RegisteredRunner API */
429 virtual void run(const RunnerRegistry &);
430 };
431
432 RunnerRegistrationEntry(rrFinalizeConfig, MemStoreCfgRr);
433
434 void MemStoreCfgRr::run(const RunnerRegistry &r)
435 {
436 // decide whether to use a shared memory cache if the user did not specify
437 if (!Config.memShared.configured()) {
438 Config.memShared.configure(Ipc::Atomic::Enabled() &&
439 Ipc::Mem::Segment::Enabled() && UsingSmp() &&
440 Config.memMaxSize > 0);
441 } else if (Config.memShared && !Ipc::Atomic::Enabled()) {
442 // bail if the user wants shared memory cache but we cannot support it
443 fatal("memory_cache_shared is on, but no support for atomic operations detected");
444 } else if (Config.memShared && !Ipc::Mem::Segment::Enabled()) {
445 fatal("memory_cache_shared is on, but no support for shared memory detected");
446 } else if (Config.memShared && !UsingSmp()) {
447 debugs(20, DBG_IMPORTANT, "WARNING: memory_cache_shared is on, but only"
448 " a single worker is running");
449 }
450 }
451
452 /// initializes shared memory segments used by MemStore
453 class MemStoreRr: public Ipc::Mem::RegisteredRunner
454 {
455 public:
456 /* RegisteredRunner API */
457 MemStoreRr(): owner(NULL) {}
458 virtual void run(const RunnerRegistry &);
459 virtual ~MemStoreRr();
460
461 protected:
462 virtual void create(const RunnerRegistry &);
463
464 private:
465 MemStoreMap::Owner *owner;
466 };
467
468 RunnerRegistrationEntry(rrAfterConfig, MemStoreRr);
469
470 void MemStoreRr::run(const RunnerRegistry &r)
471 {
472 assert(Config.memShared.configured());
473 Ipc::Mem::RegisteredRunner::run(r);
474 }
475
476 void MemStoreRr::create(const RunnerRegistry &)
477 {
478 if (!Config.memShared)
479 return;
480
481 Must(!owner);
482 const int64_t entryLimit = MemStore::EntryLimit();
483 if (entryLimit <= 0) {
484 if (Config.memMaxSize > 0) {
485 debugs(20, DBG_IMPORTANT, "WARNING: mem-cache size is too small ("
486 << (Config.memMaxSize / 1024.0) << " KB), should be >= " <<
487 (Ipc::Mem::PageSize() / 1024.0) << " KB");
488 }
489 return; // no memory cache configured or a misconfiguration
490 }
491 owner = MemStoreMap::Init(ShmLabel, entryLimit);
492 }
493
494 MemStoreRr::~MemStoreRr()
495 {
496 delete owner;
497 }